^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/act_ct.c Connection Tracking action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Paul Blakey <paulb@mellanox.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Yossi Kuperman <yossiku@mellanox.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/ipv6_frag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <uapi/linux/tc_act/tc_ct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/tc_act/tc_ct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/netfilter/nf_flow_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/netfilter/nf_conntrack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <net/netfilter/nf_conntrack_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/netfilter/nf_conntrack_zones.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/netfilter/nf_conntrack_helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/netfilter/nf_conntrack_acct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <uapi/linux/netfilter/nf_nat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static struct workqueue_struct *act_ct_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static struct rhashtable zones_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static DEFINE_MUTEX(zones_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct tcf_ct_flow_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct rhash_head node; /* In zones tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct rcu_work rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct nf_flowtable nf_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) refcount_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u16 zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool dying;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static const struct rhashtable_params zones_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .head_offset = offsetof(struct tcf_ct_flow_table, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .key_offset = offsetof(struct tcf_ct_flow_table, zone),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct flow_action_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int i = flow_action->num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return &flow_action->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void tcf_ct_add_mangle_action(struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) enum flow_action_mangle_base htype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct flow_action_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) entry = tcf_ct_flow_table_flow_action_get_next(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) entry->id = FLOW_ACTION_MANGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) entry->mangle.htype = htype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) entry->mangle.mask = ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) entry->mangle.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) entry->mangle.val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* The following nat helper functions check if the inverted reverse tuple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * (target) is different then the current dir tuple - meaning nat for ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * and/or ip is needed, and add the relevant mangle actions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct nf_conntrack_tuple target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) offsetof(struct iphdr, saddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) be32_to_cpu(target.src.u3.ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) offsetof(struct iphdr, daddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) be32_to_cpu(target.dst.u3.ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) union nf_inet_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) i * sizeof(u32) + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct nf_conntrack_tuple target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) offsetof(struct ipv6hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) offsetof(struct ipv6hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct nf_conntrack_tuple target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __be16 target_src = target.src.u.tcp.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) __be16 target_dst = target.dst.u.tcp.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (target_src != tuple->src.u.tcp.port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) offsetof(struct tcphdr, source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 0xFFFF, be16_to_cpu(target_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (target_dst != tuple->dst.u.tcp.port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) offsetof(struct tcphdr, dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 0xFFFF, be16_to_cpu(target_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct nf_conntrack_tuple target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __be16 target_src = target.src.u.udp.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) __be16 target_dst = target.dst.u.udp.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (target_src != tuple->src.u.udp.port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) offsetof(struct udphdr, source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 0xFFFF, be16_to_cpu(target_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (target_dst != tuple->dst.u.udp.port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) offsetof(struct udphdr, dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 0xFFFF, be16_to_cpu(target_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) enum ip_conntrack_dir dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct nf_conn_labels *ct_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct flow_action_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 *act_ct_labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) entry = tcf_ct_flow_table_flow_action_get_next(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) entry->id = FLOW_ACTION_CT_METADATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) entry->ct_metadata.mark = ct->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) IP_CT_ESTABLISHED_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* aligns with the CT reference on the SKB nf_ct_set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) act_ct_labels = entry->ct_metadata.labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ct_labels = nf_ct_labels_find(ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (ct_labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int tcf_ct_flow_table_add_action_nat(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) enum ip_conntrack_dir dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct nf_conntrack_tuple target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!(ct->status & IPS_NAT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) switch (tuple->src.l3num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case NFPROTO_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) case NFPROTO_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) switch (nf_ct_protonum(ct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int tcf_ct_flow_table_fill_actions(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const struct flow_offload *flow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) enum flow_offload_tuple_dir tdir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct nf_flow_rule *flow_rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct flow_action *action = &flow_rule->rule->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int num_entries = action->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct nf_conn *ct = flow->ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) enum ip_conntrack_dir dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) switch (tdir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case FLOW_OFFLOAD_DIR_ORIGINAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dir = IP_CT_DIR_ORIGINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case FLOW_OFFLOAD_DIR_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dir = IP_CT_DIR_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto err_nat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) tcf_ct_flow_table_add_action_meta(ct, dir, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err_nat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Clear filled actions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) for (i = num_entries; i < action->num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) memset(&action->entries[i], 0, sizeof(action->entries[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) action->num_entries = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static struct nf_flowtable_type flowtable_ct = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .action = tcf_ct_flow_table_fill_actions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct tcf_ct_flow_table *ct_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mutex_lock(&zones_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!ct_ft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) refcount_set(&ct_ft->ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ct_ft->zone = params->zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) goto err_insert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ct_ft->nf_ft.type = &flowtable_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err = nf_flow_table_init(&ct_ft->nf_ft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto err_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) params->ct_ft = ct_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) params->nf_ft = &ct_ft->nf_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mutex_unlock(&zones_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) err_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) err_insert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kfree(ct_ft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mutex_unlock(&zones_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct flow_block_cb *block_cb, *tmp_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct tcf_ct_flow_table *ct_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct flow_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) nf_flow_table_free(&ct_ft->nf_ft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Remove any remaining callbacks before cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) block = &ct_ft->nf_ft.flow_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) down_write(&ct_ft->nf_ft.flow_block_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_del(&block_cb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) flow_block_cb_free(block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) up_write(&ct_ft->nf_ft.flow_block_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kfree(ct_ft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct tcf_ct_flow_table *ct_ft = params->ct_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) queue_rcu_work(act_ct_wq, &ct_ft->rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) bool tcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct flow_offload *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) entry = flow_offload_alloc(ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (tcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err = flow_offload_add(&ct_ft->nf_ft, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto err_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) err_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) flow_offload_free(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) clear_bit(IPS_OFFLOAD_BIT, &ct->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) enum ip_conntrack_info ctinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) bool tcp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) switch (nf_ct_protonum(ct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tcp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ct->status & IPS_SEQ_ADJUST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) tcf_ct_flow_table_add(ct_ft, ct, tcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct flow_offload_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct tcphdr **tcph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct flow_ports *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned int thoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!pskb_network_may_pull(skb, sizeof(*iph)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) thoff = iph->ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ip_is_fragment(iph) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unlikely(thoff != sizeof(struct iphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (iph->protocol != IPPROTO_TCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) iph->protocol != IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (iph->ttl <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) thoff + sizeof(struct tcphdr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) thoff + sizeof(*ports)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (iph->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *tcph = (void *)(skb_network_header(skb) + thoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) tuple->src_v4.s_addr = iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) tuple->dst_v4.s_addr = iph->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) tuple->src_port = ports->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tuple->dst_port = ports->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) tuple->l3proto = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) tuple->l4proto = iph->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct flow_offload_tuple *tuple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct tcphdr **tcph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct flow_ports *ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct ipv6hdr *ip6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned int thoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ip6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (ip6h->nexthdr != IPPROTO_TCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ip6h->nexthdr != IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (ip6h->hop_limit <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) thoff = sizeof(*ip6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) thoff + sizeof(struct tcphdr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) thoff + sizeof(*ports)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ip6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (ip6h->nexthdr == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *tcph = (void *)(skb_network_header(skb) + thoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) tuple->src_v6 = ip6h->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) tuple->dst_v6 = ip6h->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tuple->src_port = ports->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) tuple->dst_port = ports->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tuple->l3proto = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tuple->l4proto = ip6h->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u8 family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct flow_offload_tuple_rhash *tuplehash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct flow_offload_tuple tuple = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct tcphdr *tcph = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct flow_offload *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct nf_conn *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u8 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case NFPROTO_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case NFPROTO_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tuplehash = flow_offload_lookup(nf_ft, &tuple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!tuplehash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dir = tuplehash->tuple.dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ct = flow->ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (tcph && (unlikely(tcph->fin || tcph->rst))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) flow_offload_teardown(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) IP_CT_ESTABLISHED_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) flow_offload_refresh(nf_ft, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) nf_conntrack_get(&ct->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) nf_ct_set(skb, ct, ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) nf_ct_acct_update(ct, dir, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int tcf_ct_flow_tables_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return rhashtable_init(&zones_ht, &zones_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void tcf_ct_flow_tables_uninit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rhashtable_destroy(&zones_ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static struct tc_action_ops act_ct_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static unsigned int ct_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct tc_ct_action_net {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct tc_action_net tn; /* Must be first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bool labels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) u16 zone_id, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct nf_conn *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ct = nf_ct_get(skb, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!ct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!net_eq(net, read_pnet(&ct->ct_net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (nf_ct_zone(ct)->id != zone_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Force conntrack entry direction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (nf_ct_is_confirmed(ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) nf_ct_kill(ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) nf_conntrack_put(&ct->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Trim the skb to the length specified by the IP/IPv6 header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * removing any trailing lower-layer padding. This prepares the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * for higher-layer processing that assumes skb->len excludes padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * (such as nf_ip_checksum). The caller needs to pull the skb to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case NFPROTO_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) len = ntohs(ip_hdr(skb)->tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case NFPROTO_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) len = sizeof(struct ipv6hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) + ntohs(ipv6_hdr(skb)->payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) err = pskb_trim_rcsum(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) u8 family = NFPROTO_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) switch (skb_protocol(skb, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) family = NFPROTO_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) family = NFPROTO_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) len = skb_network_offset(skb) + sizeof(struct iphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (unlikely(skb->len < len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (unlikely(!pskb_may_pull(skb, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *frag = ip_is_fragment(ip_hdr(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned int flags = 0, len, payload_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned short frag_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (unlikely(skb->len < len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (unlikely(!pskb_may_pull(skb, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (unlikely(nexthdr < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) *frag = flags & IP6_FH_F_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u8 family, u16 zone, bool *defrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct qdisc_skb_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct nf_conn *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) bool frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Previously seen (loopback)? Ignore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ct = nf_ct_get(skb, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (family == NFPROTO_IPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) err = tcf_ct_ipv4_is_fragment(skb, &frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) err = tcf_ct_ipv6_is_fragment(skb, &frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (err || !frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) cb = *qdisc_skb_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (family == NFPROTO_IPV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) err = ip_defrag(net, skb, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (err && err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *defrag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) cb.mru = IPCB(skb)->frag_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) } else { /* NFPROTO_IPV6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) err = nf_ct_frag6_gather(net, skb, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (err && err != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *defrag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cb.mru = IP6CB(skb)->frag_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *qdisc_skb_cb(skb) = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) skb->ignore_df = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static void tcf_ct_params_free(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct tcf_ct_params *params = container_of(head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct tcf_ct_params, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) tcf_ct_flow_table_put(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (params->tmpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) nf_conntrack_put(¶ms->tmpl->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) kfree(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #if IS_ENABLED(CONFIG_NF_NAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Modelled after nf_nat_ipv[46]_fn().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * range is only used for new, uninitialized NAT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Returns either NF_ACCEPT or NF_DROP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) enum ip_conntrack_info ctinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) const struct nf_nat_range2 *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) enum nf_nat_manip_type maniptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) __be16 proto = skb_protocol(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int hooknum, err = NF_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* See HOOK2MANIP(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (maniptype == NF_NAT_MANIP_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hooknum = NF_INET_LOCAL_IN; /* Source NAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) switch (ctinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) case IP_CT_RELATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) case IP_CT_RELATED_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (proto == htons(ETH_P_IP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ip_hdr(skb)->protocol == IPPROTO_ICMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) hooknum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) err = NF_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) __be16 frag_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u8 nexthdr = ipv6_hdr(skb)->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int hdrlen = ipv6_skip_exthdr(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) sizeof(struct ipv6hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) &nexthdr, &frag_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!nf_nat_icmpv6_reply_translation(skb, ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ctinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) hooknum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) hdrlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) err = NF_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Non-ICMP, fall thru to initialize if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) case IP_CT_NEW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* Seen it before? This can happen for loopback, retrans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * or local packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!nf_nat_initialized(ct, maniptype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Initialize according to the NAT action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Action is set up to establish a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ? nf_nat_setup_info(ct, range, maniptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) : nf_nat_alloc_null_binding(ct, hooknum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (err != NF_ACCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case IP_CT_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) case IP_CT_ESTABLISHED_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) err = NF_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = nf_nat_packet(ct, ctinfo, hooknum, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #endif /* CONFIG_NF_NAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u32 new_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) new_mark = mark | (ct->mark & ~(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (ct->mark != new_mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ct->mark = new_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (nf_ct_is_confirmed(ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) nf_conntrack_event_cache(IPCT_MARK, ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static void tcf_ct_act_set_labels(struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) u32 *labels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) u32 *labels_m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!memchr_inv(labels_m, 0, labels_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) nf_connlabels_replace(ct, labels, labels_m, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static int tcf_ct_act_nat(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct nf_conn *ct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) enum ip_conntrack_info ctinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) int ct_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct nf_nat_range2 *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) bool commit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #if IS_ENABLED(CONFIG_NF_NAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) enum nf_nat_manip_type maniptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!(ct_action & TCA_CT_ACT_NAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return NF_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Add NAT extension if not confirmed yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return NF_DROP; /* Can't NAT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) (ctinfo != IP_CT_RELATED || commit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* NAT an established or related connection like before. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* This is the REPLY direction for a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * for which NAT was applied in the forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * direction. Do the reverse NAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) maniptype = ct->status & IPS_SRC_NAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) maniptype = ct->status & IPS_SRC_NAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) maniptype = NF_NAT_MANIP_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) } else if (ct_action & TCA_CT_ACT_NAT_DST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) maniptype = NF_NAT_MANIP_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return NF_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (ct->status & IPS_SRC_NAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (maniptype == NF_NAT_MANIP_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) maniptype = NF_NAT_MANIP_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) maniptype = NF_NAT_MANIP_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) err = ct_nat_execute(skb, ct, ctinfo, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) maniptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) err = ct_nat_execute(skb, ct, ctinfo, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) NF_NAT_MANIP_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return NF_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bool cached, commit, clear, force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) enum ip_conntrack_info ctinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct tcf_ct *c = to_ct(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct nf_conn *tmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct nf_hook_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int nh_ofs, err, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct tcf_ct_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) bool skip_add = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bool defrag = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct nf_conn *ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u8 family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) p = rcu_dereference_bh(c->params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) retval = READ_ONCE(c->tcf_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) commit = p->ct_action & TCA_CT_ACT_COMMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) clear = p->ct_action & TCA_CT_ACT_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) force = p->ct_action & TCA_CT_ACT_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) tmpl = p->tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) tcf_lastuse_update(&c->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (clear) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ct = nf_ct_get(skb, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (ct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) nf_conntrack_put(&ct->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) family = tcf_ct_skb_nf_family(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (family == NFPROTO_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* The conntrack module expects to be working at L3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * We also try to pull the IPv4/6 header to linear area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) nh_ofs = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) skb_pull_rcsum(skb, nh_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (err == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) retval = TC_ACT_STOLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) err = tcf_ct_skb_network_trim(skb, family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* If we are recirculating packets to match on ct fields and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * committing with a separate ct action, then we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * actually run the packet through conntrack twice unless it's for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * different zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!cached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (tcf_ct_flow_table_lookup(p, skb, family)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) skip_add = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto do_nat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Associate skb with specified zone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (tmpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ct = nf_ct_get(skb, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (skb_nfct(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) nf_conntrack_put(skb_nfct(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) nf_conntrack_get(&tmpl->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) nf_ct_set(skb, tmpl, IP_CT_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) state.hook = NF_INET_PRE_ROUTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) state.net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) state.pf = family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) err = nf_conntrack_in(skb, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (err != NF_ACCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto out_push;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) do_nat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ct = nf_ct_get(skb, &ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!ct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto out_push;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) nf_ct_deliver_cached_events(ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (err != NF_ACCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (commit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* This will take care of sending queued events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * even if the connection is already confirmed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (nf_conntrack_confirm(skb) != NF_ACCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!skip_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) out_push:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) skb_push_rcsum(skb, nh_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) tcf_action_update_bstats(&c->common, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (defrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) qdisc_skb_cb(skb)->pkt_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) tcf_action_inc_drop_qstats(&c->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) [TCA_CT_ACTION] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) [TCA_CT_ZONE] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) [TCA_CT_MARK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) [TCA_CT_LABELS] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) .len = 128 / BITS_PER_BYTE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) .len = 128 / BITS_PER_BYTE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct tc_ct *parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct nf_nat_range2 *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!(p->ct_action & TCA_CT_ACT_NAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!IS_ENABLED(CONFIG_NF_NAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) (p->ct_action & TCA_CT_ACT_NAT_DST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) range = &p->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (tb[TCA_CT_NAT_IPV4_MIN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) p->ipv4_range = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) range->flags |= NF_NAT_RANGE_MAP_IPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) range->min_addr.ip =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) range->max_addr.ip = max_attr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) nla_get_in_addr(max_attr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) range->min_addr.ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) p->ipv4_range = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) range->flags |= NF_NAT_RANGE_MAP_IPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) range->min_addr.in6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) range->max_addr.in6 = max_attr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) nla_get_in6_addr(max_attr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) range->min_addr.in6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (tb[TCA_CT_NAT_PORT_MIN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) range->min_proto.all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void tcf_ct_set_key_val(struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void *val, int val_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void *mask, int mask_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!tb[val_type])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) nla_memcpy(val, tb[val_type], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) memset(mask, 0xff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) nla_memcpy(mask, tb[mask_type], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int tcf_ct_fill_params(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct tcf_ct_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct tc_ct *parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct nf_conntrack_zone zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct nf_conn *tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) p->zone = NF_CT_DEFAULT_ZONE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) tcf_ct_set_key_val(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) &p->ct_action, TCA_CT_ACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) NULL, TCA_CT_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) sizeof(p->ct_action));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (p->ct_action & TCA_CT_ACT_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) err = tcf_ct_fill_params_nat(p, parm, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (tb[TCA_CT_MARK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) tcf_ct_set_key_val(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) &p->mark, TCA_CT_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) &p->mark_mask, TCA_CT_MARK_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) sizeof(p->mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (tb[TCA_CT_LABELS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (!tn->labels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) tcf_ct_set_key_val(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) p->labels, TCA_CT_LABELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) p->labels_mask, TCA_CT_LABELS_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) sizeof(p->labels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (tb[TCA_CT_ZONE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) tcf_ct_set_key_val(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) &p->zone, TCA_CT_ZONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) NULL, TCA_CT_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) sizeof(p->zone));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (!tmpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) nf_conntrack_get(&tmpl->ct_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) p->tmpl = tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int tcf_ct_init(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct nlattr *est, struct tc_action **a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int replace, int bind, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct tcf_proto *tp, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct tc_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct tcf_ct_params *params = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct nlattr *tb[TCA_CT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct tcf_chain *goto_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct tc_ct *parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct tcf_ct *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) int err, res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!nla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!tb[TCA_CT_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) parm = nla_data(tb[TCA_CT_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) index = parm->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err = tcf_idr_check_alloc(tn, &index, a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) err = tcf_idr_create_from_flags(tn, index, est, a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) &act_ct_ops, bind, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) res = ACT_P_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (!replace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) c = to_ct(*a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) params = kzalloc(sizeof(*params), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (unlikely(!params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = tcf_ct_fill_params(net, params, parm, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) err = tcf_ct_flow_table_get(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_lock_bh(&c->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) params = rcu_replace_pointer(c->params, params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) lockdep_is_held(&c->tcf_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) spin_unlock_bh(&c->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) call_rcu(¶ms->rcu, tcf_ct_params_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) kfree(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static void tcf_ct_cleanup(struct tc_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct tcf_ct_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct tcf_ct *c = to_ct(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) params = rcu_dereference_protected(c->params, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) call_rcu(¶ms->rcu, tcf_ct_params_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static int tcf_ct_dump_key_val(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) void *val, int val_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) void *mask, int mask_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (mask && !memchr_inv(mask, 0, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err = nla_put(skb, val_type, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (mask_type != TCA_CT_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) err = nla_put(skb, mask_type, len, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct nf_nat_range2 *range = &p->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (!(p->ct_action & TCA_CT_ACT_NAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (range->flags & NF_NAT_RANGE_MAP_IPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (p->ipv4_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) range->min_addr.ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) range->max_addr.ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) &range->min_addr.in6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) &range->max_addr.in6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) range->min_proto.all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) range->max_proto.all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int bind, int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct tcf_ct *c = to_ct(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct tcf_ct_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct tc_ct opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) .index = c->tcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .refcnt = refcount_read(&c->tcf_refcnt) - ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct tcf_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) spin_lock_bh(&c->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) p = rcu_dereference_protected(c->params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) lockdep_is_held(&c->tcf_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) opt.action = c->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (tcf_ct_dump_key_val(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) &p->ct_action, TCA_CT_ACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) NULL, TCA_CT_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) sizeof(p->ct_action)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (p->ct_action & TCA_CT_ACT_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) goto skip_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) tcf_ct_dump_key_val(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) &p->mark, TCA_CT_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) &p->mark_mask, TCA_CT_MARK_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) sizeof(p->mark)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) tcf_ct_dump_key_val(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) p->labels, TCA_CT_LABELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) p->labels_mask, TCA_CT_LABELS_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) sizeof(p->labels)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) tcf_ct_dump_key_val(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) &p->zone, TCA_CT_ZONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) NULL, TCA_CT_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) sizeof(p->zone)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (tcf_ct_dump_nat(skb, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) skip_dump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) tcf_tm_dump(&t, &c->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_unlock_bh(&c->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) spin_unlock_bh(&c->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct netlink_callback *cb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct tc_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return tcf_generic_walker(tn, skb, cb, type, ops, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct tc_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return tcf_idr_search(tn, a, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) u64 drops, u64 lastuse, bool hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct tcf_ct *c = to_ct(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) tcf_action_update_stats(a, bytes, packets, drops, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static struct tc_action_ops act_ct_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .kind = "ct",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) .id = TCA_ID_CT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) .act = tcf_ct_act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .dump = tcf_ct_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .init = tcf_ct_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .cleanup = tcf_ct_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .walk = tcf_ct_walker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .lookup = tcf_ct_search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .stats_update = tcf_stats_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .size = sizeof(struct tcf_ct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static __net_init int ct_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (nf_connlabels_get(net, n_bits - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) tn->labels = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) pr_err("act_ct: Failed to set connlabels length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) tn->labels = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return tc_action_net_init(net, &tn->tn, &act_ct_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static void __net_exit ct_exit_net(struct list_head *net_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) list_for_each_entry(net, net_list, exit_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (tn->labels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) nf_connlabels_put(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) tc_action_net_exit(net_list, ct_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static struct pernet_operations ct_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) .init = ct_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) .exit_batch = ct_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) .id = &ct_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) .size = sizeof(struct tc_ct_action_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static int __init ct_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (!act_ct_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) err = tcf_ct_flow_tables_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) goto err_tbl_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) err = tcf_register_action(&act_ct_ops, &ct_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) goto err_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) err_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) tcf_ct_flow_tables_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) err_tbl_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) destroy_workqueue(act_ct_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static void __exit ct_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) tcf_unregister_action(&act_ct_ops, &ct_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) tcf_ct_flow_tables_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) destroy_workqueue(act_ct_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) module_init(ct_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) module_exit(ct_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) MODULE_DESCRIPTION("Connection tracking action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) MODULE_LICENSE("GPL v2");