^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007-2017 Nicira, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/openvswitch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sctp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/ip6_fib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/dsfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/mpls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/sctp/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "datapath.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "flow.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "conntrack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "flow_netlink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct deferred_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const struct nlattr *actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int actions_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Store pkt_key clone when creating deferred action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct sw_flow_key pkt_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ovs_frag_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct ovs_skb_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __be16 inner_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u16 network_offset; /* valid only for MPLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u16 vlan_tci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __be16 vlan_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int l2_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u8 mac_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u8 l2_data[MAX_L2_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DEFERRED_ACTION_FIFO_SIZE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define OVS_RECURSION_LIMIT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct action_fifo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Deferred action fifo queue storage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct action_flow_keys {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static struct action_fifo __percpu *action_fifos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static struct action_flow_keys __percpu *flow_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static DEFINE_PER_CPU(int, exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * space. Return NULL if out of key spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int level = this_cpu_read(exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct sw_flow_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) key = &keys->key[level - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *key = *key_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void action_fifo_init(struct action_fifo *fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) fifo->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) fifo->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static bool action_fifo_is_empty(const struct action_fifo *fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return (fifo->head == fifo->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (action_fifo_is_empty(fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return &fifo->fifo[fifo->tail++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return &fifo->fifo[fifo->head++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Return true if fifo is not full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) const struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) const struct nlattr *actions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) const int actions_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct action_fifo *fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct deferred_action *da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) fifo = this_cpu_ptr(action_fifos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) da = action_fifo_put(fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (da) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) da->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) da->actions = actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) da->actions_len = actions_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) da->pkt_key = *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void invalidate_flow_key(struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) key->mac_proto |= SW_FLOW_KEY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static bool is_flow_key_valid(const struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return !(key->mac_proto & SW_FLOW_KEY_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int clone_execute(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 recirc_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) const struct nlattr *actions, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool last, bool clone_flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) const struct nlattr *attr, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!mac_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) key->mac_proto = MAC_PROTO_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) const __be16 ethertype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) err = skb_mpls_pop(skb, ethertype, skb->mac_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (ethertype == htons(ETH_P_TEB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) key->mac_proto = MAC_PROTO_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) const __be32 *mpls_lse, const __be32 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct mpls_shim_hdr *stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) __be32 lse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) stack = mpls_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err = skb_mpls_update_lse(skb, lse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) flow_key->mpls.lse[0] = lse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err = skb_vlan_pop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) key->eth.vlan.tci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) key->eth.vlan.tpid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const struct ovs_action_push_vlan *vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) key->eth.vlan.tci = vlan->vlan_tci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) key->eth.vlan.tpid = vlan->vlan_tpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return skb_vlan_push(skb, vlan->vlan_tpid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* 'src' is already properly masked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u16 *dst = (u16 *)dst_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const u16 *src = (const u16 *)src_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) const u16 *mask = (const u16 *)mask_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) OVS_SET_MASKED(dst[0], src[0], mask[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) OVS_SET_MASKED(dst[1], src[1], mask[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) OVS_SET_MASKED(dst[2], src[2], mask[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const struct ovs_key_ethernet *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) const struct ovs_key_ethernet *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) err = skb_ensure_writable(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mask->eth_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mask->eth_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* pop_eth does not support VLAN packets as this action is never called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) err = skb_eth_pop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* safe right before invalidate_flow_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) key->mac_proto = MAC_PROTO_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) const struct ovs_action_push_eth *ethh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err = skb_eth_push(skb, ethh->addresses.eth_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ethh->addresses.eth_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* safe right before invalidate_flow_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) key->mac_proto = MAC_PROTO_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) const struct nshhdr *nh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = nsh_push(skb, nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* safe right before invalidate_flow_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) key->mac_proto = MAC_PROTO_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) err = nsh_pop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* safe right before invalidate_flow_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (skb->protocol == htons(ETH_P_TEB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) key->mac_proto = MAC_PROTO_ETHERNET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) key->mac_proto = MAC_PROTO_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) invalidate_flow_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __be32 addr, __be32 new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int transport_len = skb->len - skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (nh->frag_off & htons(IP_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (nh->protocol == IPPROTO_TCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (likely(transport_len >= sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) addr, new_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) } else if (nh->protocol == IPPROTO_UDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (likely(transport_len >= sizeof(struct udphdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct udphdr *uh = udp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) inet_proto_csum_replace4(&uh->check, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) addr, new_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!uh->check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) uh->check = CSUM_MANGLED_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __be32 *addr, __be32 new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) update_ip_l4_checksum(skb, nh, *addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) csum_replace4(&nh->check, *addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *addr = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __be32 addr[4], const __be32 new_addr[4])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int transport_len = skb->len - skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (l4_proto == NEXTHDR_TCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (likely(transport_len >= sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) addr, new_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) } else if (l4_proto == NEXTHDR_UDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (likely(transport_len >= sizeof(struct udphdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct udphdr *uh = udp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) inet_proto_csum_replace16(&uh->check, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) addr, new_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!uh->check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) uh->check = CSUM_MANGLED_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } else if (l4_proto == NEXTHDR_ICMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (likely(transport_len >= sizeof(struct icmp6hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) skb, addr, new_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) const __be32 mask[4], __be32 masked[4])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __be32 addr[4], const __be32 new_addr[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) bool recalculate_csum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (recalculate_csum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) update_ipv6_checksum(skb, l4_proto, addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) memcpy(addr, new_addr, sizeof(__be32[4]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (skb->ip_summed == CHECKSUM_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) (__force __wsum)(ipv6_tclass << 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u32 ofl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) fl = OVS_MASKED(ofl, fl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Bits 21-24 are always unmasked, so this retains their values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) nh->flow_lbl[0] = (u8)(fl >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) nh->flow_lbl[1] = (u8)(fl >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) nh->flow_lbl[2] = (u8)fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (skb->ip_summed == CHECKSUM_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (skb->ip_summed == CHECKSUM_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (__force __wsum)(new_ttl << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) nh->hop_limit = new_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) nh->ttl = new_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) const struct ovs_key_ipv4 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) const struct ovs_key_ipv4 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct iphdr *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) __be32 new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = skb_ensure_writable(skb, skb_network_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) sizeof(struct iphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nh = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Setting an IP addresses is typically only a side effect of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * matching on them in the current userspace implementation, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * makes sense to check if the value actually changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (mask->ipv4_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (unlikely(new_addr != nh->saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) set_ip_addr(skb, nh, &nh->saddr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) flow_key->ipv4.addr.src = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (mask->ipv4_dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (unlikely(new_addr != nh->daddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) set_ip_addr(skb, nh, &nh->daddr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) flow_key->ipv4.addr.dst = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (mask->ipv4_tos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) flow_key->ip.tos = nh->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (mask->ipv4_ttl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) flow_key->ip.ttl = nh->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static bool is_ipv6_mask_nonzero(const __be32 addr[4])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return !!(addr[0] | addr[1] | addr[2] | addr[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) const struct ovs_key_ipv6 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) const struct ovs_key_ipv6 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct ipv6hdr *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) err = skb_ensure_writable(skb, skb_network_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) nh = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Setting an IP addresses is typically only a side effect of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * matching on them in the current userspace implementation, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * makes sense to check if the value actually changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __be32 *saddr = (__be32 *)&nh->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __be32 masked[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) memcpy(&flow_key->ipv6.addr.src, masked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sizeof(flow_key->ipv6.addr.src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int flags = IP6_FH_F_SKIP_RH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) bool recalc_csum = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __be32 *daddr = (__be32 *)&nh->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __be32 masked[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (ipv6_ext_hdr(nh->nexthdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) recalc_csum = (ipv6_find_hdr(skb, &offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) NEXTHDR_ROUTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) NULL, &flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) != NEXTHDR_ROUTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) recalc_csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) memcpy(&flow_key->ipv6.addr.dst, masked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) sizeof(flow_key->ipv6.addr.dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (mask->ipv6_tclass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) flow_key->ip.tos = ipv6_get_dsfield(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (mask->ipv6_label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ntohl(mask->ipv6_label));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) flow_key->ipv6.label =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (mask->ipv6_hlimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) flow_key->ip.ttl = nh->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) const struct nlattr *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct nshhdr *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u8 ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct ovs_key_nsh key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct ovs_key_nsh mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) err = nsh_key_from_nlattr(a, &key, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Make sure the NSH base header is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) nh = nsh_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) length = nsh_hdr_len(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Make sure the whole NSH header is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) err = skb_ensure_writable(skb, skb_network_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) nh = nsh_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) skb_postpull_rcsum(skb, nh, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) flags = nsh_get_flags(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) flow_key->nsh.base.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ttl = nsh_get_ttl(nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) flow_key->nsh.base.ttl = ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) nsh_set_flags_and_ttl(nh, flags, ttl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mask.base.path_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) flow_key->nsh.base.path_hdr = nh->path_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) switch (nh->mdtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) case NSH_M_TYPE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nh->md1.context[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) OVS_MASKED(nh->md1.context[i], key.context[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) mask.context[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) memcpy(flow_key->nsh.context, nh->md1.context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sizeof(nh->md1.context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) case NSH_M_TYPE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) memset(flow_key->nsh.context, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) sizeof(flow_key->nsh.context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) skb_postpush_rcsum(skb, nh, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Must follow skb_ensure_writable() since that can move the skb data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void set_tp_port(struct sk_buff *skb, __be16 *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) __be16 new_port, __sum16 *check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) inet_proto_csum_replace2(check, skb, *port, new_port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *port = new_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) const struct ovs_key_udp *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) const struct ovs_key_udp *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct udphdr *uh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) __be16 src, dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) err = skb_ensure_writable(skb, skb_transport_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) sizeof(struct udphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) uh = udp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Either of the masks is non-zero, so do not bother checking them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (likely(src != uh->source)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) set_tp_port(skb, &uh->source, src, &uh->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) flow_key->tp.src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (likely(dst != uh->dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) set_tp_port(skb, &uh->dest, dst, &uh->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) flow_key->tp.dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (unlikely(!uh->check))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) uh->check = CSUM_MANGLED_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) uh->source = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) uh->dest = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) flow_key->tp.src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) flow_key->tp.dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) const struct ovs_key_tcp *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) const struct ovs_key_tcp *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) __be16 src, dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) err = skb_ensure_writable(skb, skb_transport_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) sizeof(struct tcphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (likely(src != th->source)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) set_tp_port(skb, &th->source, src, &th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) flow_key->tp.src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (likely(dst != th->dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) set_tp_port(skb, &th->dest, dst, &th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) flow_key->tp.dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) const struct ovs_key_sctp *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) const struct ovs_key_sctp *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unsigned int sctphoff = skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct sctphdr *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) __le32 old_correct_csum, new_csum, old_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sh = sctp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) old_csum = sh->checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) old_correct_csum = sctp_compute_cksum(skb, sctphoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) new_csum = sctp_compute_cksum(skb, sctphoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Carry any checksum errors through. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) flow_key->tp.src = sh->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) flow_key->tp.dst = sh->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static int ovs_vport_output(struct net *net, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct vport *vport = data->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (skb_cow_head(skb, data->l2_len) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) __skb_dst_copy(skb, data->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) *OVS_CB(skb) = data->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) skb->inner_protocol = data->inner_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (data->vlan_tci & VLAN_CFI_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) __vlan_hwaccel_clear_tag(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Reconstruct the MAC header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) skb_push(skb, data->l2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) memcpy(skb->data, &data->l2_data, data->l2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) skb_postpush_rcsum(skb, skb->data, data->l2_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (eth_p_mpls(skb->protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) skb->inner_network_header = skb->network_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) skb_set_network_header(skb, data->network_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ovs_vport_send(vport, skb, data->mac_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ovs_dst_get_mtu(const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return dst->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static struct dst_ops ovs_dst_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .family = AF_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .mtu = ovs_dst_get_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * ovs_vport_output(), which is called once per fragmented packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static void prepare_frag(struct vport *vport, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) u16 orig_network_offset, u8 mac_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) unsigned int hlen = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ovs_frag_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) data = this_cpu_ptr(&ovs_frag_data_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) data->dst = skb->_skb_refdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) data->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) data->cb = *OVS_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) data->inner_protocol = skb->inner_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) data->network_offset = orig_network_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (skb_vlan_tag_present(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) data->vlan_tci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) data->vlan_proto = skb->vlan_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) data->mac_proto = mac_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) data->l2_len = hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) memcpy(&data->l2_data, skb->data, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) skb_pull(skb, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static void ovs_fragment(struct net *net, struct vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct sk_buff *skb, u16 mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u16 orig_network_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (eth_p_mpls(skb->protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) orig_network_offset = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) skb->network_header = skb->inner_network_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (skb_network_offset(skb) > MAX_L2_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) OVS_NLERR(1, "L2 header too long to fragment");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (key->eth.type == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct rtable ovs_rt = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned long orig_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) prepare_frag(vport, skb, orig_network_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ovs_key_mac_proto(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) DST_OBSOLETE_NONE, DST_NOCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ovs_rt.dst.dev = vport->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) orig_dst = skb->_skb_refdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) skb_dst_set_noref(skb, &ovs_rt.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) IPCB(skb)->frag_max_size = mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) refdst_drop(orig_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else if (key->eth.type == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unsigned long orig_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct rt6_info ovs_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) prepare_frag(vport, skb, orig_network_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ovs_key_mac_proto(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) memset(&ovs_rt, 0, sizeof(ovs_rt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) DST_OBSOLETE_NONE, DST_NOCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ovs_rt.dst.dev = vport->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) orig_dst = skb->_skb_refdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) skb_dst_set_noref(skb, &ovs_rt.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) IP6CB(skb)->frag_max_size = mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) refdst_drop(orig_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ovs_vport_name(vport), ntohs(key->eth.type), mru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) vport->dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct vport *vport = ovs_vport_rcu(dp, out_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (likely(vport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u16 mru = OVS_CB(skb)->mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 cutlen = OVS_CB(skb)->cutlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (unlikely(cutlen > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (skb->len - cutlen > ovs_mac_header_len(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pskb_trim(skb, skb->len - cutlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pskb_trim(skb, ovs_mac_header_len(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (likely(!mru ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) (skb->len <= mru + vport->dev->hard_header_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) } else if (mru <= vport->dev->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct net *net = read_pnet(&dp->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ovs_fragment(net, vport, skb, mru, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int output_userspace(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct sw_flow_key *key, const struct nlattr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) const struct nlattr *actions, int actions_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) uint32_t cutlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct dp_upcall_info upcall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) const struct nlattr *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) memset(&upcall, 0, sizeof(upcall));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) upcall.cmd = OVS_PACKET_CMD_ACTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) upcall.mru = OVS_CB(skb)->mru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) a = nla_next(a, &rem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) switch (nla_type(a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) case OVS_USERSPACE_ATTR_USERDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) upcall.userdata = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case OVS_USERSPACE_ATTR_PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) upcall.portid = nla_get_u32(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Get out tunnel info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) vport = ovs_vport_rcu(dp, nla_get_u32(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (vport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) err = dev_fill_metadata_dst(vport->dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) upcall.egress_tun_info = skb_tunnel_info(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) case OVS_USERSPACE_ATTR_ACTIONS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* Include actions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) upcall.actions = actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) upcall.actions_len = actions_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) } /* End of switch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) const struct nlattr *attr, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct nlattr *actions = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (nla_len(actions))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return clone_execute(dp, skb, key, 0, nla_data(actions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) nla_len(actions), last, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* When 'last' is true, sample() should always consume the 'skb'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * Otherwise, sample() should keep 'skb' intact regardless what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * actions are executed within sample().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int sample(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct sw_flow_key *key, const struct nlattr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct nlattr *actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct nlattr *sample_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int rem = nla_len(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) const struct sample_arg *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) bool clone_flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) sample_arg = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) arg = nla_data(sample_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) actions = nla_next(sample_arg, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if ((arg->probability != U32_MAX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) (!arg->probability || prandom_u32() > arg->probability)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) clone_flow_key = !arg->exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return clone_execute(dp, skb, key, 0, actions, rem, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) clone_flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* When 'last' is true, clone() should always consume the 'skb'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Otherwise, clone() should keep 'skb' intact regardless what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * actions are executed within clone().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int clone(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct sw_flow_key *key, const struct nlattr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct nlattr *actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct nlattr *clone_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int rem = nla_len(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) bool dont_clone_flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) clone_arg = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dont_clone_flow_key = nla_get_u32(clone_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) actions = nla_next(clone_arg, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return clone_execute(dp, skb, key, 0, actions, rem, last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) !dont_clone_flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) const struct nlattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct ovs_action_hash *hash_act = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) hash = skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) hash = jhash_1word(hash, hash_act->hash_basis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (!hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) hash = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) key->ovs_flow_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static int execute_set_action(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) const struct nlattr *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Only tunnel set execution is supported without a mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct ovs_tunnel_info *tun = nla_data(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dst_hold((struct dst_entry *)tun->tun_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Mask is at the midpoint of the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #define get_mask(a, type) ((const type)nla_data(a) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static int execute_masked_set_action(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct sw_flow_key *flow_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) const struct nlattr *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) switch (nla_type(a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) case OVS_KEY_ATTR_PRIORITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) OVS_SET_MASKED(skb->priority, nla_get_u32(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) *get_mask(a, u32 *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) flow_key->phy.priority = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) case OVS_KEY_ATTR_SKB_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) flow_key->phy.skb_mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) case OVS_KEY_ATTR_TUNNEL_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* Masked data not supported for tunnel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) case OVS_KEY_ATTR_ETHERNET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) err = set_eth_addr(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) get_mask(a, struct ovs_key_ethernet *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) case OVS_KEY_ATTR_NSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) err = set_nsh(skb, flow_key, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case OVS_KEY_ATTR_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) err = set_ipv4(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) get_mask(a, struct ovs_key_ipv4 *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case OVS_KEY_ATTR_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) err = set_ipv6(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) get_mask(a, struct ovs_key_ipv6 *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case OVS_KEY_ATTR_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) err = set_tcp(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) get_mask(a, struct ovs_key_tcp *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) case OVS_KEY_ATTR_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) err = set_udp(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) get_mask(a, struct ovs_key_udp *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case OVS_KEY_ATTR_SCTP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) err = set_sctp(skb, flow_key, nla_data(a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) get_mask(a, struct ovs_key_sctp *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case OVS_KEY_ATTR_MPLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) __be32 *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case OVS_KEY_ATTR_CT_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case OVS_KEY_ATTR_CT_ZONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) case OVS_KEY_ATTR_CT_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) case OVS_KEY_ATTR_CT_LABELS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) const struct nlattr *a, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) u32 recirc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!is_flow_key_valid(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) err = ovs_flow_key_update(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) BUG_ON(!is_flow_key_valid(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) recirc_id = nla_get_u32(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) const struct nlattr *attr, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) const struct nlattr *actions, *cpl_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int len, max_len, rem = nla_len(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) const struct check_pkt_len_arg *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) bool clone_flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* The first netlink attribute in 'attr' is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cpl_arg = nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) arg = nla_data(cpl_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) max_len = arg->pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) len <= max_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Second netlink attribute in 'attr' is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) actions = nla_next(cpl_arg, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) clone_flow_key = !arg->exec_for_lesser_equal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* Third netlink attribute in 'attr' is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) actions = nla_next(cpl_arg, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) actions = nla_next(actions, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) clone_flow_key = !arg->exec_for_greater;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return clone_execute(dp, skb, key, 0, nla_data(actions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) nla_len(actions), last, clone_flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct ipv6hdr *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) err = skb_ensure_writable(skb, skb_network_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) sizeof(*nh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) nh = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (nh->hop_limit <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) key->ip.ttl = --nh->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct iphdr *nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) u8 old_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) err = skb_ensure_writable(skb, skb_network_offset(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sizeof(*nh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) nh = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (nh->ttl <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) old_ttl = nh->ttl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) csum_replace2(&nh->check, htons(old_ttl << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) htons(nh->ttl << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) key->ip.ttl = nh->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Execute a list of actions against 'skb'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct sw_flow_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) const struct nlattr *attr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) const struct nlattr *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) for (a = attr, rem = len; rem > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) a = nla_next(a, &rem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) switch (nla_type(a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) case OVS_ACTION_ATTR_OUTPUT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) int port = nla_get_u32(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct sk_buff *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* Every output action needs a separate clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * of 'skb', In case the output action is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * last action, cloning can be avoided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (nla_is_last(a, rem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) do_output(dp, skb, port, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* 'skb' has been used for output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) clone = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) do_output(dp, clone, port, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) OVS_CB(skb)->cutlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) case OVS_ACTION_ATTR_TRUNC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct ovs_action_trunc *trunc = nla_data(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (skb->len > trunc->max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) case OVS_ACTION_ATTR_USERSPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) output_userspace(dp, skb, key, a, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) len, OVS_CB(skb)->cutlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) OVS_CB(skb)->cutlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) case OVS_ACTION_ATTR_HASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) execute_hash(skb, key, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) case OVS_ACTION_ATTR_PUSH_MPLS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct ovs_action_push_mpls *mpls = nla_data(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) err = push_mpls(skb, key, mpls->mpls_lse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mpls->mpls_ethertype, skb->mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case OVS_ACTION_ATTR_ADD_MPLS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct ovs_action_add_mpls *mpls = nla_data(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) __u16 mac_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) mac_len = skb->mac_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) err = push_mpls(skb, key, mpls->mpls_lse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) mpls->mpls_ethertype, mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) case OVS_ACTION_ATTR_POP_MPLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) err = pop_mpls(skb, key, nla_get_be16(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) case OVS_ACTION_ATTR_PUSH_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err = push_vlan(skb, key, nla_data(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) case OVS_ACTION_ATTR_POP_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) err = pop_vlan(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) case OVS_ACTION_ATTR_RECIRC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) bool last = nla_is_last(a, rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) err = execute_recirc(dp, skb, key, a, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* If this is the last action, the skb has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * been consumed or freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * Return immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) case OVS_ACTION_ATTR_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) err = execute_set_action(skb, key, nla_data(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case OVS_ACTION_ATTR_SET_MASKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) case OVS_ACTION_ATTR_SET_TO_MASKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) err = execute_masked_set_action(skb, key, nla_data(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) case OVS_ACTION_ATTR_SAMPLE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) bool last = nla_is_last(a, rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) err = sample(dp, skb, key, a, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) case OVS_ACTION_ATTR_CT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!is_flow_key_valid(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) err = ovs_flow_key_update(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) nla_data(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* Hide stolen IP fragments from user space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return err == -EINPROGRESS ? 0 : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) case OVS_ACTION_ATTR_CT_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) err = ovs_ct_clear(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) case OVS_ACTION_ATTR_PUSH_ETH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) err = push_eth(skb, key, nla_data(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) case OVS_ACTION_ATTR_POP_ETH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) err = pop_eth(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case OVS_ACTION_ATTR_PUSH_NSH: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) u8 buffer[NSH_HDR_MAX_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct nshhdr *nh = (struct nshhdr *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) err = nsh_hdr_from_nlattr(nla_data(a), nh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) NSH_HDR_MAX_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err = push_nsh(skb, key, nh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) case OVS_ACTION_ATTR_POP_NSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) err = pop_nsh(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) case OVS_ACTION_ATTR_METER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case OVS_ACTION_ATTR_CLONE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) bool last = nla_is_last(a, rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) err = clone(dp, skb, key, a, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) bool last = nla_is_last(a, rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) err = execute_check_pkt_len(dp, skb, key, a, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) case OVS_ACTION_ATTR_DEC_TTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) err = execute_dec_ttl(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (err == -EHOSTUNREACH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) err = dec_ttl_exception_handler(dp, skb, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) a, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* Execute the actions on the clone of the packet. The effect of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * execution does not affect the original 'skb' nor the original 'key'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * The execution may be deferred in case the actions can not be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static int clone_execute(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct sw_flow_key *key, u32 recirc_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) const struct nlattr *actions, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) bool last, bool clone_flow_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct deferred_action *da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct sw_flow_key *clone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /* Out of memory, skip this action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* When clone_flow_key is false, the 'key' will not be change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * by the actions, then the 'key' can be used directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * Otherwise, try to clone key from the next recursion level of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * 'flow_keys'. If clone is successful, execute the actions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * without deferring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) clone = clone_flow_key ? clone_key(key) : key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (clone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (actions) { /* Sample action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (clone_flow_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) __this_cpu_inc(exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) err = do_execute_actions(dp, skb, clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) actions, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (clone_flow_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) __this_cpu_dec(exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) } else { /* Recirc action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) clone->recirc_id = recirc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ovs_dp_process_packet(skb, clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Out of 'flow_keys' space. Defer actions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) da = add_deferred_actions(skb, key, actions, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (da) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!actions) { /* Recirc action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) key = &da->pkt_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) key->recirc_id = recirc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Out of per CPU action FIFO space. Drop the 'skb' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * log an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (net_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (actions) { /* Sample action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) pr_warn("%s: deferred action limit reached, drop sample action\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) ovs_dp_name(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) } else { /* Recirc action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) pr_warn("%s: deferred action limit reached, drop recirc action\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ovs_dp_name(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static void process_deferred_actions(struct datapath *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct action_fifo *fifo = this_cpu_ptr(action_fifos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* Do not touch the FIFO in case there is no deferred actions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (action_fifo_is_empty(fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* Finishing executing all deferred actions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct deferred_action *da = action_fifo_get(fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct sk_buff *skb = da->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) struct sw_flow_key *key = &da->pkt_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) const struct nlattr *actions = da->actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) int actions_len = da->actions_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (actions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) do_execute_actions(dp, skb, key, actions, actions_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ovs_dp_process_packet(skb, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) } while (!action_fifo_is_empty(fifo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* Reset FIFO for the next packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) action_fifo_init(fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* Execute a list of actions against 'skb'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) const struct sw_flow_actions *acts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct sw_flow_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) int err, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) level = __this_cpu_inc_return(exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (unlikely(level > OVS_RECURSION_LIMIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ovs_dp_name(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) OVS_CB(skb)->acts_origlen = acts->orig_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) err = do_execute_actions(dp, skb, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) acts->actions, acts->actions_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (level == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) process_deferred_actions(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) __this_cpu_dec(exec_actions_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) int action_fifos_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) action_fifos = alloc_percpu(struct action_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!action_fifos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) flow_keys = alloc_percpu(struct action_flow_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (!flow_keys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) free_percpu(action_fifos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) void action_fifos_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) free_percpu(action_fifos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) free_percpu(flow_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }