Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2007-2014 Nicira, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "datapath.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "vport-internal_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static LIST_HEAD(vport_ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* Protected by RCU read lock for reading, ovs_mutex for writing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static struct hlist_head *dev_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define VPORT_HASH_BUCKETS 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	ovs_vport_init - initialize vport subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Called at module load time to initialize the vport subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) int ovs_vport_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	if (!dev_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *	ovs_vport_exit - shutdown vport subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * Called at module exit time to shutdown the vport subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void ovs_vport_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	kfree(dev_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static struct hlist_head *hash_bucket(const struct net *net, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) int __ovs_vport_ops_register(struct vport_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct vport_ops *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	ovs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	list_for_each_entry(o, &vport_ops_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		if (ops->type == o->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	list_add_tail(&ops->list, &vport_ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	ovs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void ovs_vport_ops_unregister(struct vport_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	ovs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	list_del(&ops->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ovs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *	ovs_vport_locate - find a port that has already been created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @net: network namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @name: name of port to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * Must be called with ovs or RCU read lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) struct vport *ovs_vport_locate(const struct net *net, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct hlist_head *bucket = hash_bucket(net, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	hlist_for_each_entry_rcu(vport, bucket, hash_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				 lockdep_ovsl_is_held())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		if (!strcmp(name, ovs_vport_name(vport)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		    net_eq(ovs_dp_get_net(vport->dp), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *	ovs_vport_alloc - allocate and initialize new vport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * @priv_size: Size of private data area to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * @ops: vport device ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * Allocate and initialize a new vport defined by @ops.  The vport will contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * a private data area of size @priv_size that can be accessed using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * vport_priv().  vports that are no longer needed should be released with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * vport_free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			      const struct vport_parms *parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	alloc_size = sizeof(struct vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (priv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		alloc_size += priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	vport = kzalloc(alloc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (!vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	vport->dp = parms->dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	vport->port_no = parms->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	vport->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	INIT_HLIST_NODE(&vport->dp_hash_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		kfree(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EXPORT_SYMBOL_GPL(ovs_vport_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *	ovs_vport_free - uninitialize and free vport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * @vport: vport to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * Frees a vport allocated with vport_alloc() when it is no longer needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * The caller must ensure that an RCU grace period has passed since the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * time @vport was in a datapath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void ovs_vport_free(struct vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/* vport is freed from RCU callback or error path, Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * it is safe to use raw dereference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	kfree(rcu_dereference_raw(vport->upcall_portids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	kfree(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) EXPORT_SYMBOL_GPL(ovs_vport_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct vport_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	list_for_each_entry(ops, &vport_ops_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (ops->type == parms->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *	ovs_vport_add - add vport device (for kernel callers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * @parms: Information about new vport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * Creates a new vport with the specified configuration (which is dependent on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * device type).  ovs_mutex must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct vport *ovs_vport_add(const struct vport_parms *parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct vport_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	ops = ovs_vport_lookup(parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		struct hlist_head *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		if (!try_module_get(ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		vport = ops->create(parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (IS_ERR(vport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		bucket = hash_bucket(ovs_dp_get_net(vport->dp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				     ovs_vport_name(vport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		hlist_add_head_rcu(&vport->hash_node, bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/* Unlock to attempt module load and return -EAGAIN if load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * was successful as we need to restart the port addition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * workflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	ovs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	request_module("vport-type-%d", parms->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	ovs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (!ovs_vport_lookup(parms))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *	ovs_vport_set_options - modify existing vport device (for kernel callers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * @vport: vport to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * @options: New configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * Modifies an existing device with the specified configuration (which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * dependent on device type).  ovs_mutex must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (!vport->ops->set_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return vport->ops->set_options(vport, options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *	ovs_vport_del - delete existing vport device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * @vport: vport to delete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * Detaches @vport from its datapath and destroys it.  ovs_mutex must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void ovs_vport_del(struct vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	hlist_del_rcu(&vport->hash_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	module_put(vport->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	vport->ops->destroy(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *	ovs_vport_get_stats - retrieve device stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * @vport: vport from which to retrieve the stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * @stats: location to store stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * Retrieves transmit, receive, and error stats for the given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * Must be called with ovs_mutex or rcu_read_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	const struct rtnl_link_stats64 *dev_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	struct rtnl_link_stats64 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	dev_stats = dev_get_stats(vport->dev, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	stats->rx_errors  = dev_stats->rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	stats->tx_errors  = dev_stats->tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	stats->tx_dropped = dev_stats->tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	stats->rx_dropped = dev_stats->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	stats->rx_bytes	  = dev_stats->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	stats->rx_packets = dev_stats->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	stats->tx_bytes	  = dev_stats->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	stats->tx_packets = dev_stats->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *	ovs_vport_get_options - retrieve device options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @vport: vport from which to retrieve the options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @skb: sk_buff where options should be appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Retrieves the configuration of the given device, appending an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * vport-specific attributes to @skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * negative error code if a real error occurred.  If an error occurs, @skb is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * left unmodified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * Must be called with ovs_mutex or rcu_read_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (!vport->ops->get_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	err = vport->ops->get_options(vport, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		nla_nest_cancel(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	nla_nest_end(skb, nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  *	ovs_vport_set_upcall_portids - set upcall portids of @vport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @vport: vport to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  * @ids: new configuration, an array of port ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * Sets the vport's upcall_portids to @ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * as an array of U32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * Must be called with ovs_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct vport_portids *old, *vport_portids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	old = ovsl_dereference(vport->upcall_portids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!vport_portids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	vport_portids->n_ids = nla_len(ids) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	nla_memcpy(vport_portids->ids, ids, nla_len(ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	rcu_assign_pointer(vport->upcall_portids, vport_portids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		kfree_rcu(old, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *	ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * @vport: vport from which to retrieve the portids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * @skb: sk_buff where portids should be appended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * Retrieves the configuration of the given vport, appending the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * portids to @skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * If an error occurs, @skb is left unmodified.  Must be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * ovs_mutex or rcu_read_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int ovs_vport_get_upcall_portids(const struct vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 				 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct vport_portids *ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	ids = rcu_dereference_ovsl(vport->upcall_portids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			       ids->n_ids * sizeof(u32), (void *)ids->ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  *	ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  * @vport: vport from which the missed packet is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * @skb: skb that the missed packet was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * Uses the skb_get_hash() to select the upcall portid to send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * upcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * Returns the portid of the target socket.  Must be called with rcu_read_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 ovs_vport_find_upcall_portid(const struct vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct vport_portids *ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	u32 ids_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	ids = rcu_dereference(vport->upcall_portids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/* If there is only one portid, select it in the fast-path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (ids->n_ids == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return ids->ids[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	hash = skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	return ids->ids[ids_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  *	ovs_vport_receive - pass up received packet to the datapath for processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * @vport: vport that received the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * @skb: skb that was received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * @tun_info: tunnel (if any) that carried packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * Must be called with rcu_read_lock.  The packet cannot be shared and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * skb->data should point to the Ethernet header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		      const struct ip_tunnel_info *tun_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	struct sw_flow_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	OVS_CB(skb)->input_vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	OVS_CB(skb)->mru = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	OVS_CB(skb)->cutlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		u32 mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		mark = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		skb_scrub_packet(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		skb->mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		tun_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	/* Extract flow from 'skb' into 'key'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	error = ovs_flow_key_extract(tun_info, skb, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (unlikely(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	ovs_dp_process_packet(skb, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int packet_length(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			 struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	int length = skb->len - dev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (!skb_vlan_tag_present(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	    eth_type_vlan(skb->protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		length -= VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	/* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	 * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	 * account for 802.1ad. e.g. is_skb_forwardable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	return length > 0 ? length : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	int mtu = vport->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	switch (vport->dev->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	case ARPHRD_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (mac_proto == MAC_PROTO_ETHERNET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			skb->protocol = htons(ETH_P_TEB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		} else if (mac_proto != MAC_PROTO_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	case ARPHRD_ETHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		if (mac_proto != MAC_PROTO_ETHERNET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (unlikely(packet_length(skb, vport->dev) > mtu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		     !skb_is_gso(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				     vport->dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				     packet_length(skb, vport->dev), mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		vport->dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	skb->dev = vport->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	skb->tstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	vport->ops->send(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }