^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2018, Intel Corporation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /* This provides a net_failover interface for paravirtual drivers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * provide an alternate datapath by exporting APIs to create and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * destroy a upper 'net_failover' netdev. The upper dev manages the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * original paravirtual interface as a 'standby' netdev and uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * generic failover infrastructure to register and manage a direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * attached VF as a 'primary' netdev. This enables live migration of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * a VM with direct attached VF by failing over to the paravirtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * datapath when the VF is unplugged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Some of the netdev management routines are based on bond/team driver as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * this driver provides active-backup functionality similar to those drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/netpoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <uapi/linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/net_failover.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static bool net_failover_xmit_ready(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return netif_running(dev) && netif_carrier_ok(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int net_failover_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) err = dev_open(primary_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) goto err_primary_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (standby_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) err = dev_open(standby_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto err_standby_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if ((primary_dev && net_failover_xmit_ready(primary_dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) (standby_dev && net_failover_xmit_ready(standby_dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) netif_tx_wake_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) err_standby_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (primary_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) dev_close(primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) err_primary_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int net_failover_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) slave_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dev_close(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) slave_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dev_close(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) atomic_long_inc(&dev->tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct net_device *xmit_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Try xmit via primary netdev followed by standby netdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) xmit_dev = rcu_dereference_bh(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) xmit_dev = rcu_dereference_bh(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!xmit_dev || !net_failover_xmit_ready(xmit_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return net_failover_drop_xmit(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) skb->dev = xmit_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static u16 net_failover_select_queue(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct net_device *sb_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct net_device *primary_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u16 txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) primary_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const struct net_device_ops *ops = primary_dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (ops->ndo_select_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) txq = netdev_pick_tx(primary_dev, skb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Save the original txq to restore before passing to the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (unlikely(txq >= dev->real_num_tx_queues)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) txq -= dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) } while (txq >= dev->real_num_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * that some drivers can provide 32bit values only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void net_failover_fold_stats(struct rtnl_link_stats64 *_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const struct rtnl_link_stats64 *_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) const struct rtnl_link_stats64 *_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) const u64 *new = (const u64 *)_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const u64 *old = (const u64 *)_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 *res = (u64 *)_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u64 nv = new[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u64 ov = old[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) s64 delta = nv - ov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* detects if this particular field is 32bit only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (((nv | ov) >> 32) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) delta = (s64)(s32)((u32)nv - (u32)ov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* filter anomalies, some drivers reset their stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * at down/up events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (delta > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) res[i] += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void net_failover_get_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) const struct rtnl_link_stats64 *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct rtnl_link_stats64 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_lock(&nfo_info->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) memcpy(stats, &nfo_info->failover_stats, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) slave_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (slave_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) new = dev_get_stats(slave_dev, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) net_failover_fold_stats(stats, new, &nfo_info->primary_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) memcpy(&nfo_info->primary_stats, new, sizeof(*new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) slave_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (slave_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) new = dev_get_stats(slave_dev, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) net_failover_fold_stats(stats, new, &nfo_info->standby_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) memcpy(&nfo_info->standby_stats, new, sizeof(*new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) memcpy(&nfo_info->failover_stats, stats, sizeof(*stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_unlock(&nfo_info->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ret = dev_set_mtu(primary_dev, new_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (standby_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ret = dev_set_mtu(standby_dev, new_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (primary_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dev_set_mtu(primary_dev, dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void net_failover_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) slave_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (slave_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_uc_sync_multiple(slave_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_mc_sync_multiple(slave_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) slave_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (slave_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) dev_uc_sync_multiple(slave_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dev_mc_sync_multiple(slave_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int net_failover_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) primary_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = vlan_vid_add(primary_dev, proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) standby_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (standby_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret = vlan_vid_add(standby_dev, proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (primary_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) vlan_vid_del(primary_dev, proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int net_failover_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) slave_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) vlan_vid_del(slave_dev, proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) slave_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vlan_vid_del(slave_dev, proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static const struct net_device_ops failover_dev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .ndo_open = net_failover_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .ndo_stop = net_failover_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .ndo_start_xmit = net_failover_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .ndo_select_queue = net_failover_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .ndo_get_stats64 = net_failover_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .ndo_change_mtu = net_failover_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) .ndo_set_rx_mode = net_failover_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .ndo_vlan_rx_add_vid = net_failover_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .ndo_vlan_rx_kill_vid = net_failover_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .ndo_features_check = passthru_features_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define FAILOVER_NAME "net_failover"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define FAILOVER_VERSION "0.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void nfo_ethtool_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct ethtool_drvinfo *drvinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int nfo_ethtool_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) slave_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!slave_dev || !net_failover_xmit_ready(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) slave_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!slave_dev || !net_failover_xmit_ready(slave_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cmd->base.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) cmd->base.port = PORT_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cmd->base.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return __ethtool_get_link_ksettings(slave_dev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static const struct ethtool_ops failover_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .get_drvinfo = nfo_ethtool_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .get_link_ksettings = nfo_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Called when slave dev is injecting data into network stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Change the associated network device from lower dev to failover dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct sk_buff *skb = *pskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) primary_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) standby_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (primary_dev && skb->dev == standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return RX_HANDLER_EXACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return RX_HANDLER_ANOTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void net_failover_compute_features(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) netdev_features_t vlan_features = FAILOVER_VLAN_FEATURES &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) NETIF_F_ALL_FOR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) netdev_features_t enc_features = FAILOVER_ENC_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned short max_hard_header_len = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) IFF_XMIT_DST_RELEASE_PERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct net_failover_info *nfo_info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) primary_dev = rcu_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) vlan_features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) netdev_increment_features(vlan_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) primary_dev->vlan_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) FAILOVER_VLAN_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) enc_features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) netdev_increment_features(enc_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) primary_dev->hw_enc_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) FAILOVER_ENC_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dst_release_flag &= primary_dev->priv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (primary_dev->hard_header_len > max_hard_header_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) max_hard_header_len = primary_dev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) standby_dev = rcu_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (standby_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) vlan_features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) netdev_increment_features(vlan_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) standby_dev->vlan_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) FAILOVER_VLAN_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) enc_features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) netdev_increment_features(enc_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) standby_dev->hw_enc_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) FAILOVER_ENC_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dst_release_flag &= standby_dev->priv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (standby_dev->hard_header_len > max_hard_header_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) max_hard_header_len = standby_dev->hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev->vlan_features = vlan_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev->hard_header_len = max_hard_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (dst_release_flag == (IFF_XMIT_DST_RELEASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) IFF_XMIT_DST_RELEASE_PERM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev->priv_flags |= IFF_XMIT_DST_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) netdev_change_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void net_failover_lower_state_changed(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct net_device *primary_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct net_device *standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct netdev_lag_lower_state_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (netif_carrier_ok(slave_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) info.link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) info.link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (slave_dev == primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (netif_running(primary_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) info.tx_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) info.tx_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if ((primary_dev && netif_running(primary_dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) (!netif_running(standby_dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) info.tx_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) info.tx_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) netdev_lower_state_changed(slave_dev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int net_failover_slave_pre_register(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct net_device *standby_dev, *primary_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bool slave_is_standby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (slave_is_standby ? standby_dev : primary_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) netdev_err(failover_dev, "%s attempting to register as slave dev when %s already present\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) slave_dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) slave_is_standby ? "standby" : "primary");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* We want to allow only a direct attached VF device as a primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * netdev. As there is no easy way to check for a VF device, restrict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * this to a pci device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!slave_is_standby && (!slave_dev->dev.parent ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) !dev_is_pci(slave_dev->dev.parent)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (failover_dev->features & NETIF_F_VLAN_CHALLENGED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) vlan_uses_dev(failover_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) netdev_err(failover_dev, "Device %s is VLAN challenged and failover device has VLAN set up\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) failover_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int net_failover_slave_register(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct net_device *standby_dev, *primary_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bool slave_is_standby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) u32 orig_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Align MTU of slave with failover dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) orig_mtu = slave_dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) err = dev_set_mtu(slave_dev, failover_dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) slave_dev->name, failover_dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dev_hold(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (netif_running(failover_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) err = dev_open(slave_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (err && (err != -EBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) netdev_err(failover_dev, "Opening slave %s failed err:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) slave_dev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto err_dev_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) netif_addr_lock_bh(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev_uc_sync_multiple(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dev_mc_sync_multiple(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) netif_addr_unlock_bh(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) err = vlan_vids_add_by_dev(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) netdev_err(failover_dev, "Failed to add vlan ids to device %s err:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) slave_dev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto err_vlan_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (slave_is_standby) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rcu_assign_pointer(nfo_info->standby_dev, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) standby_dev = slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dev_get_stats(standby_dev, &nfo_info->standby_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rcu_assign_pointer(nfo_info->primary_dev, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) primary_dev = slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_get_stats(primary_dev, &nfo_info->primary_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) failover_dev->min_mtu = slave_dev->min_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) failover_dev->max_mtu = slave_dev->max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) net_failover_compute_features(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) netdev_info(failover_dev, "failover %s slave:%s registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) slave_is_standby ? "standby" : "primary", slave_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) err_vlan_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dev_uc_unsync(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_mc_unsync(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_close(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) err_dev_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev_put(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_set_mtu(slave_dev, orig_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int net_failover_slave_pre_unregister(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct net_device *standby_dev, *primary_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (slave_dev != primary_dev && slave_dev != standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static int net_failover_slave_unregister(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct net_device *standby_dev, *primary_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) bool slave_is_standby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) vlan_vids_del_by_dev(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev_uc_unsync(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dev_mc_unsync(slave_dev, failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) dev_close(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev_get_stats(failover_dev, &nfo_info->failover_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (slave_is_standby) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) RCU_INIT_POINTER(nfo_info->standby_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) RCU_INIT_POINTER(nfo_info->primary_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (standby_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) failover_dev->min_mtu = standby_dev->min_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) failover_dev->max_mtu = standby_dev->max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dev_put(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) net_failover_compute_features(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) netdev_info(failover_dev, "failover %s slave:%s unregistered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) slave_is_standby ? "standby" : "primary", slave_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int net_failover_slave_link_change(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (slave_dev != primary_dev && slave_dev != standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if ((primary_dev && net_failover_xmit_ready(primary_dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) (standby_dev && net_failover_xmit_ready(standby_dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) netif_carrier_on(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) netif_tx_wake_all_queues(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev_get_stats(failover_dev, &nfo_info->failover_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) netif_carrier_off(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) netif_tx_stop_all_queues(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int net_failover_slave_name_change(struct net_device *slave_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct net_device *failover_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct net_device *primary_dev, *standby_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) primary_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) standby_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (slave_dev != primary_dev && slave_dev != standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* We need to bring up the slave after the rename by udev in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * open failed with EBUSY when it was registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_open(slave_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static struct failover_ops net_failover_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) .slave_pre_register = net_failover_slave_pre_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .slave_register = net_failover_slave_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .slave_pre_unregister = net_failover_slave_pre_unregister,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .slave_unregister = net_failover_slave_unregister,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .slave_link_change = net_failover_slave_link_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .slave_name_change = net_failover_slave_name_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .slave_handle_frame = net_failover_handle_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * net_failover_create - Create and register a failover instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * @dev: standby netdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Creates a failover netdev and registers a failover instance for a standby
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * netdev. Used by paravirtual drivers that use 3-netdev model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * The failover netdev acts as a master device and controls 2 slave devices -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * the original standby netdev and a VF netdev with the same MAC gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * registered as primary netdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * Return: pointer to failover instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct failover *net_failover_create(struct net_device *standby_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct device *dev = standby_dev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct net_device *failover_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct failover *failover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Alloc at least 2 queues, for now we are going with 16 assuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * that VF devices being enslaved won't have too many queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) failover_dev = alloc_etherdev_mq(sizeof(struct net_failover_info), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!failover_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dev_err(dev, "Unable to allocate failover_netdev!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dev_net_set(failover_dev, dev_net(standby_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) SET_NETDEV_DEV(failover_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) failover_dev->netdev_ops = &failover_dev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) failover_dev->ethtool_ops = &failover_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Initialize the device options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) failover_dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) failover_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) IFF_TX_SKB_SHARING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* don't acquire failover netdev's netif_tx_lock when transmitting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) failover_dev->features |= NETIF_F_LLTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Don't allow failover devices to change network namespaces. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) failover_dev->features |= NETIF_F_NETNS_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) NETIF_F_HW_VLAN_CTAG_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) NETIF_F_HW_VLAN_CTAG_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) failover_dev->features |= failover_dev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) memcpy(failover_dev->dev_addr, standby_dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) failover_dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) failover_dev->min_mtu = standby_dev->min_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) failover_dev->max_mtu = standby_dev->max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) err = register_netdev(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dev_err(dev, "Unable to register failover_dev!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto err_register_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) netif_carrier_off(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) failover = failover_register(failover_dev, &net_failover_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (IS_ERR(failover)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) err = PTR_ERR(failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto err_failover_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return failover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) err_failover_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unregister_netdev(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) err_register_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) free_netdev(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) EXPORT_SYMBOL_GPL(net_failover_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * net_failover_destroy - Destroy a failover instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * @failover: pointer to failover instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Unregisters any slave netdevs associated with the failover instance by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * calling failover_slave_unregister().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * unregisters the failover instance itself and finally frees the failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * netdev. Used by paravirtual drivers that use 3-netdev model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) void net_failover_destroy(struct failover *failover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct net_failover_info *nfo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct net_device *failover_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct net_device *slave_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!failover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) failover_dev = rcu_dereference(failover->failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) nfo_info = netdev_priv(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) netif_device_detach(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) slave_dev = rtnl_dereference(nfo_info->primary_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) failover_slave_unregister(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) slave_dev = rtnl_dereference(nfo_info->standby_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (slave_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) failover_slave_unregister(slave_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) failover_unregister(failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unregister_netdevice(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) free_netdev(failover_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) EXPORT_SYMBOL_GPL(net_failover_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static __init int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) net_failover_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) module_init(net_failover_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) static __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) void net_failover_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) module_exit(net_failover_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) MODULE_DESCRIPTION("Failover driver for Paravirtual drivers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) MODULE_LICENSE("GPL v2");