Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/switchdev/switchdev.c - Switch device API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/if_bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <net/switchdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static LIST_HEAD(deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static DEFINE_SPINLOCK(deferred_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) typedef void switchdev_deferred_func_t(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 				       const void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct switchdev_deferred_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	switchdev_deferred_func_t *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	unsigned long data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct switchdev_deferred_item *dfitem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	spin_lock_bh(&deferred_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (list_empty(&deferred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		dfitem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	dfitem = list_first_entry(&deferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 				  struct switchdev_deferred_item, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	list_del(&dfitem->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	spin_unlock_bh(&deferred_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return dfitem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *	switchdev_deferred_process - Process ops in deferred queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *	Called to flush the ops currently queued in deferred ops queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *	rtnl_lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) void switchdev_deferred_process(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct switchdev_deferred_item *dfitem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	while ((dfitem = switchdev_deferred_dequeue())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		dfitem->func(dfitem->dev, dfitem->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		dev_put(dfitem->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		kfree(dfitem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) EXPORT_SYMBOL_GPL(switchdev_deferred_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static void switchdev_deferred_process_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	switchdev_deferred_process();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static int switchdev_deferred_enqueue(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				      const void *data, size_t data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 				      switchdev_deferred_func_t *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct switchdev_deferred_item *dfitem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!dfitem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	dfitem->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	dfitem->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	memcpy(dfitem->data, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	spin_lock_bh(&deferred_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	list_add_tail(&dfitem->list, &deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	spin_unlock_bh(&deferred_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	schedule_work(&deferred_process_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				      struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 				      const struct switchdev_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				      struct switchdev_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct switchdev_notifier_port_attr_info attr_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		.attr = attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		.trans = trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		.handled = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	rc = call_switchdev_blocking_notifiers(nt, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 					       &attr_info.info, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	err = notifier_to_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		WARN_ON(!attr_info.handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (!attr_info.handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int switchdev_port_attr_set_now(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				       const struct switchdev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct switchdev_trans trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/* Phase I: prepare for attr set. Driver/device should fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * here if there are going to be issues in the commit phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * such as lack of resources or support.  The driver/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * should reserve resources needed for the commit phase here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 * but should not commit the attr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	trans.ph_prepare = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 					 &trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* Phase II: commit attr set.  This cannot fail as a fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * of driver/device.  If it does, it's a bug in the driver/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 * because the driver said everythings was OK in phase I.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	trans.ph_prepare = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 					 &trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	     dev->name, attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void switchdev_port_attr_set_deferred(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 					     const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	const struct switchdev_attr *attr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	err = switchdev_port_attr_set_now(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			   err, attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (attr->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		attr->complete(dev, err, attr->complete_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int switchdev_port_attr_set_defer(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 					 const struct switchdev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 					  switchdev_port_attr_set_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *	switchdev_port_attr_set - Set port attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *	@dev: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *	@attr: attribute to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  *	Use a 2-phase prepare-commit transaction model to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *	system is not left in a partially updated state due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  *	failure from driver/device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *	rtnl_lock must be held and must not be in atomic section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *	in case SWITCHDEV_F_DEFER flag is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int switchdev_port_attr_set(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			    const struct switchdev_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (attr->flags & SWITCHDEV_F_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return switchdev_port_attr_set_defer(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return switchdev_port_attr_set_now(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static size_t switchdev_obj_size(const struct switchdev_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	switch (obj->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return sizeof(struct switchdev_obj_port_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	case SWITCHDEV_OBJ_ID_PORT_MDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return sizeof(struct switchdev_obj_port_mdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	case SWITCHDEV_OBJ_ID_HOST_MDB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return sizeof(struct switchdev_obj_port_mdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				     struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 				     const struct switchdev_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 				     struct switchdev_trans *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 				     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct switchdev_notifier_port_obj_info obj_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		.obj = obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		.trans = trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		.handled = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	err = notifier_to_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		WARN_ON(!obj_info.handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (!obj_info.handled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static int switchdev_port_obj_add_now(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				      const struct switchdev_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 				      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct switchdev_trans trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	/* Phase I: prepare for obj add. Driver/device should fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * here if there are going to be issues in the commit phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * such as lack of resources or support.  The driver/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * should reserve resources needed for the commit phase here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * but should not commit the obj.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	trans.ph_prepare = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 					dev, obj, &trans, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	/* Phase II: commit obj add.  This cannot fail as a fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * of driver/device.  If it does, it's a bug in the driver/device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 * because the driver said everythings was OK in phase I.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	trans.ph_prepare = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 					dev, obj, &trans, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void switchdev_port_obj_add_deferred(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 					    const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	const struct switchdev_obj *obj = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	err = switchdev_port_obj_add_now(dev, obj, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			   err, obj->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (obj->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		obj->complete(dev, err, obj->complete_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int switchdev_port_obj_add_defer(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 					const struct switchdev_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 					  switchdev_port_obj_add_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *	switchdev_port_obj_add - Add port object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  *	@dev: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *	@obj: object to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  *	@extack: netlink extended ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *	Use a 2-phase prepare-commit transaction model to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  *	system is not left in a partially updated state due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  *	failure from driver/device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  *	rtnl_lock must be held and must not be in atomic section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  *	in case SWITCHDEV_F_DEFER flag is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int switchdev_port_obj_add(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			   const struct switchdev_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (obj->flags & SWITCHDEV_F_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		return switchdev_port_obj_add_defer(dev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	return switchdev_port_obj_add_now(dev, obj, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int switchdev_port_obj_del_now(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				      const struct switchdev_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 					 dev, obj, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void switchdev_port_obj_del_deferred(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 					    const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	const struct switchdev_obj *obj = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	err = switchdev_port_obj_del_now(dev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			   err, obj->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (obj->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		obj->complete(dev, err, obj->complete_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int switchdev_port_obj_del_defer(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 					const struct switchdev_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 					  switchdev_port_obj_del_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *	switchdev_port_obj_del - Delete port object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  *	@dev: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *	@obj: object to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *	rtnl_lock must be held and must not be in atomic section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  *	in case SWITCHDEV_F_DEFER flag is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int switchdev_port_obj_del(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			   const struct switchdev_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (obj->flags & SWITCHDEV_F_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		return switchdev_port_obj_del_defer(dev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	return switchdev_port_obj_del_now(dev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  *	register_switchdev_notifier - Register notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  *	@nb: notifier_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  *	Register switch device notifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int register_switchdev_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) EXPORT_SYMBOL_GPL(register_switchdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *	unregister_switchdev_notifier - Unregister notifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  *	@nb: notifier_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  *	Unregister switch device notifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int unregister_switchdev_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  *	call_switchdev_notifiers - Call notifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  *	@val: value passed unmodified to notifier function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  *	@dev: port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  *	@info: notifier information data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *	@extack: netlink extended ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  *	Call all network notifier blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			     struct switchdev_notifier_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	info->extack = extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int register_switchdev_blocking_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	return blocking_notifier_chain_register(chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	return blocking_notifier_chain_unregister(chain, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 				      struct switchdev_notifier_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 				      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	info->extack = extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 					    val, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int __switchdev_handle_port_obj_add(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			struct switchdev_notifier_port_obj_info *port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			int (*add_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				      const struct switchdev_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 				      struct switchdev_trans *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				      struct netlink_ext_ack *extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct netlink_ext_ack *extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct net_device *lower_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	int err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	if (check_cb(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			     extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			port_obj_info->handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 * unsupported devices, another driver might be able to handle them. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * propagate to the callers any hard errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 * If the driver does its own bookkeeping of stacked ports, it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 * necessary to go through this helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		if (netif_is_bridge_master(lower_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 						      check_cb, add_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int switchdev_handle_port_obj_add(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			struct switchdev_notifier_port_obj_info *port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			int (*add_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				      const struct switchdev_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 				      struct switchdev_trans *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				      struct netlink_ext_ack *extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 					      add_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (err == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int __switchdev_handle_port_obj_del(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			struct switchdev_notifier_port_obj_info *port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			int (*del_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 				      const struct switchdev_obj *obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	struct net_device *lower_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	int err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	if (check_cb(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		err = del_cb(dev, port_obj_info->obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		if (err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			port_obj_info->handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 * unsupported devices, another driver might be able to handle them. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 * propagate to the callers any hard errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	 * If the driver does its own bookkeeping of stacked ports, it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	 * necessary to go through this helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		if (netif_is_bridge_master(lower_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 						      check_cb, del_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int switchdev_handle_port_obj_del(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			struct switchdev_notifier_port_obj_info *port_obj_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			int (*del_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 				      const struct switchdev_obj *obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 					      del_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (err == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int __switchdev_handle_port_attr_set(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			struct switchdev_notifier_port_attr_info *port_attr_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			int (*set_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 				      const struct switchdev_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 				      struct switchdev_trans *trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct net_device *lower_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	int err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (check_cb(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		if (err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			port_attr_info->handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	 * unsupported devices, another driver might be able to handle them. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	 * propagate to the callers any hard errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	 * If the driver does its own bookkeeping of stacked ports, it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	 * necessary to go through this helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		if (netif_is_bridge_master(lower_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 						       check_cb, set_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int switchdev_handle_port_attr_set(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 			struct switchdev_notifier_port_attr_info *port_attr_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			bool (*check_cb)(const struct net_device *dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 			int (*set_cb)(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 				      const struct switchdev_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 				      struct switchdev_trans *trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 					       set_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if (err == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);