^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/if_bonding.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/ipx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/bonding.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/bond_alb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #pragma pack(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct learning_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u8 mac_dst[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u8 mac_src[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __be16 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u8 padding[ETH_ZLEN - ETH_HLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct arp_pkt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __be16 hw_addr_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __be16 prot_addr_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u8 hw_addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u8 prot_addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __be16 op_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 mac_src[ETH_ALEN]; /* sender hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __be32 ip_src; /* sender IP address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 mac_dst[ETH_ALEN]; /* target hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __be32 ip_dst; /* target IP address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #pragma pack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* Forward declaration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bool strict_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void rlb_src_unlink(struct bonding *bond, u32 index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 ip_dst_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u8 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (i = 0; i < hash_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) hash ^= hash_start[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*********************** tlb specific functions ***************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (save_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) entry->load_history = 1 + entry->tx_bytes /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) BOND_TLB_REBALANCE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) entry->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) entry->tx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) entry->next = TLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) entry->prev = TLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline void tlb_init_slave(struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) SLAVE_TLB_INFO(slave).load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int save_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct tlb_client_info *tx_hash_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* clear slave from tx_hashtbl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* skip this if we've already freed the tx hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (tx_hash_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) index = SLAVE_TLB_INFO(slave).head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) while (index != TLB_NULL_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 next_index = tx_hash_table[index].next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) tlb_init_table_entry(&tx_hash_table[index], save_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) index = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) tlb_init_slave(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int save_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) __tlb_clear_slave(bond, slave, save_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Must be called before starting the monitor timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int tlb_initialize(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct tlb_client_info *new_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) new_hashtbl = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!new_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bond_info->tx_hashtbl = new_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Must be called only after all slaves have been released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void tlb_deinitialize(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kfree(bond_info->tx_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bond_info->tx_hashtbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static long long compute_gap(struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct slave *slave, *least_loaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) long long max_gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) least_loaded = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) max_gap = LLONG_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Find the slave with the largest gap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bond_for_each_slave_rcu(bond, slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (bond_slave_can_tx(slave)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) long long gap = compute_gap(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (max_gap < gap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) least_loaded = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) max_gap = gap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return least_loaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 skb_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct tlb_client_info *hash_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct slave *assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) hash_table = bond_info->tx_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) assigned_slave = hash_table[hash_index].tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!assigned_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) assigned_slave = tlb_get_least_loaded_slave(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (assigned_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct tlb_slave_info *slave_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) &(SLAVE_TLB_INFO(assigned_slave));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 next_index = slave_info->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) hash_table[hash_index].tx_slave = assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hash_table[hash_index].next = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) hash_table[hash_index].prev = TLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (next_index != TLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) hash_table[next_index].prev = hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) slave_info->head = hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) slave_info->load +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) hash_table[hash_index].load_history;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (assigned_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) hash_table[hash_index].tx_bytes += skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 skb_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct slave *tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* We don't need to disable softirq here, becase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * tlb_choose_channel() is only called by bond_alb_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * which already has softirq disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) spin_lock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*********************** rlb specific functions ***************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* when an ARP REPLY is received from a client update its info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * in the rx_hashtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if ((client_info->assigned) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (client_info->ip_src == arp->ip_dst) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) (client_info->ip_dst == arp->ip_src) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* update the clients MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ether_addr_copy(client_info->mac_dst, arp->mac_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) client_info->ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bond_info->rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct arp_pkt *arp, _arp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (skb->protocol != cpu_to_be16(ETH_P_ARP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!arp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* We received an ARP from arp->ip_src.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * We might have used this IP address previously (on the bonding host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * itself or on a system that is bridged together with the bond).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * However, if arp->mac_src is different than what is stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * rx_hashtbl, some other host is now using the IP and we must prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * sending out client updates with this IP address and the old MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Clean up all hash table entries that have this address as ip_src but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * have a different mac_src.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) rlb_purge_src_ip(bond, arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (arp->op_code == htons(ARPOP_REPLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* update rx hash table for this ARP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) rlb_update_entry_from_arp(bond, arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return RX_HANDLER_ANOTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Caller must hold rcu_read_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static struct slave *__rlb_next_rx_slave(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct slave *before = NULL, *rx_slave = NULL, *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bond_for_each_slave_rcu(bond, slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!bond_slave_can_tx(slave))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!before || before->speed < slave->speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) before = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!rx_slave || rx_slave->speed < slave->speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rx_slave = slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (slave == bond_info->rx_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* we didn't find anything after the current or we have something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * better before and up to the current slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!rx_slave || (before && rx_slave->speed < before->speed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rx_slave = before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (rx_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bond_info->rx_slave = rx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return rx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static struct slave *rlb_next_rx_slave(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct slave *rx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rx_slave = __rlb_next_rx_slave(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return rx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* teach the switch the mac of a disabled slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * on the primary for fault tolerance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Caller must hold RTNL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!curr_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!bond->alb_info.primary_is_promisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!dev_set_promiscuity(curr_active->dev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) bond->alb_info.primary_is_promisc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bond->alb_info.primary_is_promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bond->alb_info.rlb_promisc_timeout_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) alb_send_learning_packets(curr_active, addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* slave being removed should not be active at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * Caller must hold rtnl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct rlb_client_info *rx_hash_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 index, next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* clear slave from rx_hashtbl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) rx_hash_table = bond_info->rx_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) for (; index != RLB_NULL_INDEX; index = next_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) next_index = rx_hash_table[index].used_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (rx_hash_table[index].slave == slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct slave *assigned_slave = rlb_next_rx_slave(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (assigned_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rx_hash_table[index].slave = assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bond_info->rx_hashtbl[index].ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bond_info->rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* A slave has been removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * table because it is either disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * or being released. We must retry the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * update to avoid clients from not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * being updated & disconnecting when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * there is stress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bond_info->rlb_update_retry_counter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) RLB_UPDATE_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) } else { /* there is no active slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) rx_hash_table[index].slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (slave != rtnl_dereference(bond->curr_active_slave))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void rlb_update_client(struct rlb_client_info *client_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) client_info->ip_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) client_info->slave->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) client_info->ip_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) client_info->mac_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) client_info->slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) client_info->mac_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) slave_err(client_info->slave->bond->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) client_info->slave->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) "failed to create an ARP packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) skb->dev = client_info->slave->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (client_info->vlan_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) client_info->vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) arp_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* sends ARP REPLIES that update the clients that need updating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void rlb_update_rx_clients(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) hash_index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) for (; hash_index != RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) hash_index = client_info->used_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (client_info->ntt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) rlb_update_client(client_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (bond_info->rlb_update_retry_counter == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) client_info->ntt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* do not update the entries again until this counter is zero so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * not to confuse the clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* The slave was assigned a new mac address - update the clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int ntt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) hash_index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for (; hash_index != RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) hash_index = client_info->used_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if ((client_info->slave == slave) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) is_valid_ether_addr(client_info->mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) client_info->ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* update the team's flag only after the whole iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (ntt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) bond_info->rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* fasten the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* mark all clients using src_ip to be updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) spin_lock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) hash_index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (; hash_index != RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) hash_index = client_info->used_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!client_info->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* update all clients using this src_ip, that are not assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * to the team's address (curr_active_slave) and have a known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * unicast mac address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if ((client_info->ip_src == src_ip) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bond->dev->dev_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) is_valid_ether_addr(client_info->mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) client_info->ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bond_info->rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static struct slave *rlb_choose_channel(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct bonding *bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) const struct arp_pkt *arp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct slave *assigned_slave, *curr_active_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) u32 hash_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) spin_lock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) curr_active_slave = rcu_dereference(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (client_info->assigned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if ((client_info->ip_src == arp->ip_src) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) (client_info->ip_dst == arp->ip_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* the entry is already assigned to this client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!is_broadcast_ether_addr(arp->mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* update mac address from arp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ether_addr_copy(client_info->mac_dst, arp->mac_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ether_addr_copy(client_info->mac_src, arp->mac_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) assigned_slave = client_info->slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (assigned_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) spin_unlock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* the entry is already assigned to some other client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * move the old client to primary (curr_active_slave) so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * that the new client can be assigned to this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (curr_active_slave &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) client_info->slave != curr_active_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) client_info->slave = curr_active_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rlb_update_client(client_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* assign a new slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) assigned_slave = __rlb_next_rx_slave(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (assigned_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!(client_info->assigned &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) client_info->ip_src == arp->ip_src)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* ip_src is going to be updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * fix the src hash list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sizeof(arp->ip_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rlb_src_unlink(bond, hash_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rlb_src_link(bond, hash_src, hash_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) client_info->ip_src = arp->ip_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) client_info->ip_dst = arp->ip_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* arp->mac_dst is broadcast for arp reqeusts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * will be updated with clients actual unicast mac address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * upon receiving an arp reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ether_addr_copy(client_info->mac_dst, arp->mac_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ether_addr_copy(client_info->mac_src, arp->mac_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) client_info->slave = assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (is_valid_ether_addr(client_info->mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) client_info->ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) bond->alb_info.rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) client_info->ntt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (vlan_get_tag(skb, &client_info->vlan_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) client_info->vlan_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!client_info->assigned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bond_info->rx_hashtbl_used_head = hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) client_info->used_next = prev_tbl_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (prev_tbl_head != RLB_NULL_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) bond_info->rx_hashtbl[prev_tbl_head].used_prev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) client_info->assigned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) spin_unlock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* chooses (and returns) transmit channel for arp reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * does not choose channel for other arp types since they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * sent on the curr_active_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct slave *tx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct arp_pkt *arp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!pskb_network_may_pull(skb, sizeof(*arp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) arp = (struct arp_pkt *)skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Don't modify or load balance ARPs that do not originate locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * (e.g.,arrive via a bridge).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!bond_slave_has_mac_rx(bond, arp->mac_src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (arp->op_code == htons(ARPOP_REPLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* the arp must be sent on the selected rx channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tx_slave = rlb_choose_channel(skb, bond, arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (tx_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tx_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) tx_slave ? tx_slave->dev->name : "NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) } else if (arp->op_code == htons(ARPOP_REQUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* Create an entry in the rx_hashtbl for this client as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * place holder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * When the arp reply is received the entry will be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * with the correct unicast address of the client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) tx_slave = rlb_choose_channel(skb, bond, arp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* The ARP reply packets must be delayed so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * they can cancel out the influence of the ARP request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* arp requests are broadcast and are sent on the primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * the arp request will collapse all clients on the subnet to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * the primary slave. We must register these clients to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * updated with their assigned mac.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rlb_req_update_subnet_clients(bond, arp->ip_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) tx_slave ? tx_slave->dev->name : "NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static void rlb_rebalance(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct slave *assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct rlb_client_info *client_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ntt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ntt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) hash_index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) for (; hash_index != RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) hash_index = client_info->used_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) client_info = &(bond_info->rx_hashtbl[hash_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) assigned_slave = __rlb_next_rx_slave(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (assigned_slave && (client_info->slave != assigned_slave)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) client_info->slave = assigned_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!is_zero_ether_addr(client_info->mac_dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) client_info->ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* update the team's flag only after the whole iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (ntt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) bond_info->rx_ntt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Caller must hold mode_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) entry->used_next = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) entry->used_prev = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) entry->assigned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) entry->slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) entry->vlan_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void rlb_init_table_entry_src(struct rlb_client_info *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) entry->src_first = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) entry->src_prev = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) entry->src_next = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static void rlb_init_table_entry(struct rlb_client_info *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) memset(entry, 0, sizeof(struct rlb_client_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rlb_init_table_entry_dst(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) rlb_init_table_entry_src(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u32 next_index = bond_info->rx_hashtbl[index].used_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (index == bond_info->rx_hashtbl_used_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) bond_info->rx_hashtbl_used_head = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (prev_index != RLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) bond_info->rx_hashtbl[prev_index].used_next = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (next_index != RLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bond_info->rx_hashtbl[next_index].used_prev = prev_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* unlink a rlb hash table entry from the src list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void rlb_src_unlink(struct bonding *bond, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) u32 next_index = bond_info->rx_hashtbl[index].src_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (next_index != RLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) bond_info->rx_hashtbl[next_index].src_prev = prev_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (prev_index == RLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* is prev_index pointing to the head of this list? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (bond_info->rx_hashtbl[prev_index].src_first == index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bond_info->rx_hashtbl[prev_index].src_first = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) bond_info->rx_hashtbl[prev_index].src_next = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static void rlb_delete_table_entry(struct bonding *bond, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) rlb_delete_table_entry_dst(bond, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) rlb_init_table_entry_dst(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) rlb_src_unlink(bond, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* add the rx_hashtbl[ip_dst_hash] entry to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * of entries with identical ip_src_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) next = bond_info->rx_hashtbl[ip_src_hash].src_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (next != RLB_NULL_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * not match arp->mac_src
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) index = bond_info->rx_hashtbl[ip_src_hash].src_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) while (index != RLB_NULL_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u32 next_index = entry->src_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (entry->ip_src == arp->ip_src &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rlb_delete_table_entry(bond, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) index = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int rlb_initialize(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct rlb_client_info *new_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) new_hashtbl = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!new_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bond_info->rx_hashtbl = new_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rlb_init_table_entry(bond_info->rx_hashtbl + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* register to receive ARPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) bond->recv_probe = rlb_arp_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static void rlb_deinitialize(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) kfree(bond_info->rx_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bond_info->rx_hashtbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) u32 curr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) spin_lock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) curr_index = bond_info->rx_hashtbl_used_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) while (curr_index != RLB_NULL_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (curr->vlan_id == vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) rlb_delete_table_entry(bond, curr_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) curr_index = next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) spin_unlock_bh(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*********************** tlb/rlb shared functions *********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) __be16 vlan_proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct learning_pkt pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int size = sizeof(struct learning_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) memset(&pkt, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ether_addr_copy(pkt.mac_dst, mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ether_addr_copy(pkt.mac_src, mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) skb = dev_alloc_skb(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) skb_put_data(skb, &pkt, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) skb->network_header = skb->mac_header + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) skb->protocol = pkt.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) skb->priority = TC_PRIO_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) skb->dev = slave->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) slave_dbg(slave->bond->dev, slave->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) "Send learning packet: mac %pM vlan %d\n", mac_addr, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct alb_walk_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct bonding *bond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct slave *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u8 *mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) bool strict_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int alb_upper_dev_walk(struct net_device *upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct netdev_nested_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) bool strict_match = data->strict_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct bonding *bond = data->bond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct slave *slave = data->slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) u8 *mac_addr = data->mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct bond_vlan_tag *tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (is_vlan_dev(upper) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) bond->dev->lower_level == upper->lower_level - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (upper->addr_assign_type == NET_ADDR_STOLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) alb_send_lp_vid(slave, mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) vlan_dev_vlan_proto(upper),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) vlan_dev_vlan_id(upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) alb_send_lp_vid(slave, upper->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) vlan_dev_vlan_proto(upper),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) vlan_dev_vlan_id(upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* If this is a macvlan device, then only send updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * when strict_match is turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (netif_is_macvlan(upper) && !strict_match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) tags = bond_verify_device_path(bond->dev, upper, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (IS_ERR_OR_NULL(tags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) alb_send_lp_vid(slave, upper->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) tags[0].vlan_proto, tags[0].vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) kfree(tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) bool strict_match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct bonding *bond = bond_get_bond_by_slave(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct netdev_nested_priv priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct alb_walk_data data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .strict_match = strict_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .mac_addr = mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .slave = slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .bond = bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) priv.data = (void *)&data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* send untagged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) alb_send_lp_vid(slave, mac_addr, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* loop through all devices and see if we need to send a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * for that device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct net_device *dev = slave->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct sockaddr_storage ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) memcpy(dev->dev_addr, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* for rlb each slave must have a unique hw mac addresses so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * each slave will receive packets destined to a different mac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) memcpy(ss.__data, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ss.ss_family = dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Swap MAC addresses between two slaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * Called with RTNL held, and no other locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) u8 tmp_mac_addr[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) slave1->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) slave2->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) alb_set_slave_mac_addr(slave2, tmp_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) slave1->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Send learning packets after MAC address swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Called with RTNL and no other locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct slave *slave2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct slave *disabled_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* fasten the change in the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (bond_slave_can_tx(slave1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (bond->alb_info.rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* inform the clients that the mac address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) rlb_req_update_slave_clients(bond, slave1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) disabled_slave = slave1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (bond_slave_can_tx(slave2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (bond->alb_info.rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* inform the clients that the mac address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) rlb_req_update_slave_clients(bond, slave2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) disabled_slave = slave2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (bond->alb_info.rlb_enabled && slaves_state_differ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* A disabled slave was assigned an active mac addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rlb_teach_disabled_mac_on_primary(bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) disabled_slave->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * alb_change_hw_addr_on_detach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * @bond: bonding we're working on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * @slave: the slave that was just detached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * We assume that @slave was already detached from the slave list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * If @slave's permanent hw address is different both from its current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * address and from @bond's address, then somewhere in the bond there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * a slave that has @slave's permanet address as its current address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * We'll make sure that that slave no longer uses @slave's permanent address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Caller must hold RTNL and no other locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int perm_curr_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int perm_bond_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct slave *found_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) slave->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) bond->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (perm_curr_diff && perm_bond_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (found_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) alb_swap_mac_addr(slave, found_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) alb_fasten_mac_swap(bond, slave, found_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * alb_handle_addr_collision_on_attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * @bond: bonding we're working on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * @slave: the slave that was just attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * checks uniqueness of slave's mac address and handles the case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * new slave uses the bonds mac address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * If the permanent hw address of @slave is @bond's hw address, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * find a different hw address to give @slave, that isn't in use by any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * slave in the bond. This address must be, of course, one of the permanent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * addresses of the other slaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * We go over the slave list, and for each slave there we compare its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * permanent hw address with the current address of all the other slaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * If no match was found, then we've found a slave with a permanent address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * that isn't used by any other slave in the bond, so we can assign it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * @slave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * assumption: this function is called before @slave is attached to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * bond slave list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct slave *tmp_slave1, *free_mac_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!bond_has_slaves(bond)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* this is the first slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* if slave's mac address differs from bond's mac address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * check uniqueness of slave's mac address against the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * slaves in the bond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Try setting slave mac to bond address and fall-through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * to code handling that situation below...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) bond->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* The slave's address is equal to the address of the bond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * Search for a spare address in the bond for this slave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) bond_for_each_slave(bond, tmp_slave1, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* no slave has tmp_slave1's perm addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * as its curr addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) free_mac_slave = tmp_slave1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!has_bond_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) bond->dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) has_bond_addr = tmp_slave1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (free_mac_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) free_mac_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) free_mac_slave->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) } else if (has_bond_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * alb_set_mac_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * @bond: bonding we're working on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * @addr: MAC address to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * In TLB mode all slaves are configured to the bond's hw address, but set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * their dev_addr field to different addresses (based on their permanent hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * addresses).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * For each slave, this function sets the interface to the new address and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * changes its dev_addr field to its previous value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * Unwinding assumes bond's mac address has not yet changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static int alb_set_mac_address(struct bonding *bond, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct slave *slave, *rollback_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct sockaddr_storage ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) char tmp_addr[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (bond->alb_info.rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) bond_for_each_slave(bond, slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* save net_device's current hw address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) res = dev_set_mac_address(slave->dev, addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* restore net_device's hw address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ss.ss_family = bond->dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* unwind from head to the slave that failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) bond_for_each_slave(bond, rollback_slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (rollback_slave == slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) rollback_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) dev_set_mac_address(rollback_slave->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) (struct sockaddr *)&ss, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) rollback_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /************************ exported alb funcions ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) res = tlb_initialize(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) bond->alb_info.rlb_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) res = rlb_initialize(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) tlb_deinitialize(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) bond->alb_info.rlb_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) void bond_alb_deinitialize(struct bonding *bond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) tlb_deinitialize(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (bond_info->rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) rlb_deinitialize(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct slave *tx_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct ethhdr *eth_data = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (!tx_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /* unbalanced or unassigned, send through primary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) tx_slave = rcu_dereference(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (bond->params.tlb_dynamic_lb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) bond_info->unbalanced_load += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (tx_slave && bond_slave_can_tx(tx_slave)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ether_addr_copy(eth_data->h_source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) tx_slave->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (tx_slave && bond->params.tlb_dynamic_lb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) spin_lock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) __tlb_clear_slave(bond, tx_slave, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) spin_unlock(&bond->mode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* no suitable interface, frame not sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return bond_tx_drop(bond->dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct slave *tx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct ethhdr *eth_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) u32 hash_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) eth_data = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Do not TX balance any multicast or broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (!is_multicast_ether_addr(eth_data->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) switch (skb->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) case htons(ETH_P_IPX):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* In case of IPX, it will falback to L2 hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) hash_index = bond_xmit_hash(bond, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (bond->params.tlb_dynamic_lb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) tx_slave = tlb_choose_channel(bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) hash_index & 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct bond_up_slave *slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) slaves = rcu_dereference(bond->usable_slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) count = slaves ? READ_ONCE(slaves->count) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (likely(count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) tx_slave = slaves->arr[hash_index %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct bonding *bond = netdev_priv(bond_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct slave *tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) tx_slave = bond_xmit_tlb_slave_get(bond, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return bond_do_alb_xmit(skb, bond, tx_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static const __be32 ip_bcast = htonl(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct slave *tx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) const u8 *hash_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) bool do_tx_balance = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct ethhdr *eth_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) u32 hash_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int hash_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) eth_data = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) switch (ntohs(skb->protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) case ETH_P_IP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (is_broadcast_ether_addr(eth_data->h_dest) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) !pskb_network_may_pull(skb, sizeof(*iph))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) hash_start = (char *)&(iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) hash_size = sizeof(iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case ETH_P_IPV6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) const struct ipv6hdr *ip6hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* IPv6 doesn't really use broadcast mac address, but leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * that here just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (is_broadcast_ether_addr(eth_data->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* IPv6 uses all-nodes multicast as an equivalent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * broadcasts in IPv4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* Additionally, DAD probes should not be tx-balanced as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * will lead to false positives for duplicate addresses and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * prevent address configuration from working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ip6hdr = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (ipv6_addr_any(&ip6hdr->saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) hash_start = (char *)&ip6hdr->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) hash_size = sizeof(ip6hdr->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) case ETH_P_IPX: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) const struct ipxhdr *ipxhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ipxhdr = (struct ipxhdr *)skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* something is wrong with this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* The only protocol worth balancing in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * this family since it has an "ARP" like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) eth_data = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) hash_start = (char *)eth_data->h_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) hash_size = ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case ETH_P_ARP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (bond_info->rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) tx_slave = rlb_arp_xmit(skb, bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) do_tx_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (do_tx_balance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (bond->params.tlb_dynamic_lb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) hash_index = _simple_hash(hash_start, hash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * do_tx_balance means we are free to select the tx_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * So we do exactly what tlb would do for hash selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct bond_up_slave *slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) slaves = rcu_dereference(bond->usable_slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) count = slaves ? READ_ONCE(slaves->count) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (likely(count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return tx_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct bonding *bond = netdev_priv(bond_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct slave *tx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) tx_slave = bond_xmit_alb_slave_get(bond, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return bond_do_alb_xmit(skb, bond, tx_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) void bond_alb_monitor(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct bonding *bond = container_of(work, struct bonding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) alb_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct list_head *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct slave *slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!bond_has_slaves(bond)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) atomic_set(&bond_info->tx_rebalance_counter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) bond_info->lp_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) goto re_arm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) atomic_inc(&bond_info->tx_rebalance_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) bond_info->lp_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* send learning packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) bool strict_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) bond_for_each_slave_rcu(bond, slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* If updating current_active, use all currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * user mac addreses (!strict_match). Otherwise, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * use mac of the slave device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * In RLB mode, we always use strict matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) bond_info->rlb_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) alb_send_learning_packets(slave, slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) strict_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) bond_info->lp_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* rebalance tx traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) bond_for_each_slave_rcu(bond, slave, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) tlb_clear_slave(bond, slave, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (slave == rcu_access_pointer(bond->curr_active_slave)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) SLAVE_TLB_INFO(slave).load =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) bond_info->unbalanced_load /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) BOND_TLB_REBALANCE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) bond_info->unbalanced_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) atomic_set(&bond_info->tx_rebalance_counter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (bond_info->rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (bond_info->primary_is_promisc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* dev_set_promiscuity requires rtnl and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * nothing else. Avoid race with bond_close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (!rtnl_trylock())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) goto re_arm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) bond_info->rlb_promisc_timeout_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* If the primary was set to promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * because a slave was disabled then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * it can now leave promiscuous mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) bond_info->primary_is_promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (bond_info->rlb_rebalance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) bond_info->rlb_rebalance = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) rlb_rebalance(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* check if clients need updating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (bond_info->rx_ntt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (bond_info->rlb_update_delay_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) --bond_info->rlb_update_delay_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) rlb_update_rx_clients(bond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (bond_info->rlb_update_retry_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) --bond_info->rlb_update_retry_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) bond_info->rx_ntt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) re_arm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* assumption: called before the slave is attached to the bond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * and not locked by the bond lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) res = alb_handle_addr_collision_on_attach(bond, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tlb_init_slave(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* order a rebalance ASAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) atomic_set(&bond->alb_info.tx_rebalance_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) BOND_TLB_REBALANCE_TICKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (bond->alb_info.rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) bond->alb_info.rlb_rebalance = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * Caller must hold RTNL and no other locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (bond_has_slaves(bond))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) alb_change_hw_addr_on_detach(bond, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) tlb_clear_slave(bond, slave, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (bond->alb_info.rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) bond->alb_info.rx_slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) rlb_clear_slave(bond, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (link == BOND_LINK_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) tlb_clear_slave(bond, slave, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (bond->alb_info.rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) rlb_clear_slave(bond, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) } else if (link == BOND_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /* order a rebalance ASAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) atomic_set(&bond_info->tx_rebalance_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) BOND_TLB_REBALANCE_TICKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (bond->alb_info.rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) bond->alb_info.rlb_rebalance = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* If the updelay module parameter is smaller than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * forwarding delay of the switch the rebalance will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * not work because the rebalance arp replies will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * not be forwarded to the clients..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (bond_is_nondyn_tlb(bond)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (bond_update_slave_arr(bond, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) pr_err("Failed to build slave-array for TLB mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * bond_alb_handle_active_change - assign new curr_active_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * @bond: our bonding struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * @new_slave: new slave to assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * Set the bond->curr_active_slave to @new_slave and handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * mac address swapping and promiscuity changes as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * Caller must hold RTNL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct slave *swap_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) struct slave *curr_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) curr_active = rtnl_dereference(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (curr_active == new_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (curr_active && bond->alb_info.primary_is_promisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) dev_set_promiscuity(curr_active->dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) bond->alb_info.primary_is_promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) bond->alb_info.rlb_promisc_timeout_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) swap_slave = curr_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) rcu_assign_pointer(bond->curr_active_slave, new_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!new_slave || !bond_has_slaves(bond))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* set the new curr_active_slave to the bonds mac address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!swap_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /* Arrange for swap_slave and new_slave to temporarily be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * ignored so we can mess with their MAC addresses without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * fear of interference from transmit activity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (swap_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) tlb_clear_slave(bond, swap_slave, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) tlb_clear_slave(bond, new_slave, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /* in TLB mode, the slave might flip down/up with the old dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * and thus filter bond->dev_addr's packets, so force bond's mac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (BOND_MODE(bond) == BOND_MODE_TLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct sockaddr_storage ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) u8 tmp_addr[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) new_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bond->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ss.ss_family = bond->dev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* we don't care if it can't change its mac, best effort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) new_slave->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /* curr_active_slave must be set before calling alb_swap_mac_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (swap_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* swap mac address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) alb_swap_mac_addr(swap_slave, new_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) alb_fasten_mac_swap(bond, swap_slave, new_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) /* set the new_slave to the bond mac address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) bond->dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) alb_send_learning_packets(new_slave, bond->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* Called with RTNL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) struct bonding *bond = netdev_priv(bond_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) struct sockaddr_storage *ss = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct slave *curr_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) struct slave *swap_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!is_valid_ether_addr(ss->__data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) res = alb_set_mac_address(bond, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* If there is no curr_active_slave there is nothing else to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * Otherwise we'll need to pass the new address to it and handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * duplications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) curr_active = rtnl_dereference(bond->curr_active_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (!curr_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (swap_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) alb_swap_mac_addr(swap_slave, curr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) alb_fasten_mac_swap(bond, swap_slave, curr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) bond_dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) alb_send_learning_packets(curr_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) bond_dev->dev_addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (bond->alb_info.rlb_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /* inform clients mac address has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) rlb_req_update_slave_clients(bond, curr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (bond->alb_info.rlb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) rlb_clear_vlan(bond, vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)