^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (c) 2020 Facebook Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/ethtool_netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/udp_tunnel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/vxlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) enum udp_tunnel_nic_table_entry_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct udp_tunnel_nic_table_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) __be16 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u16 use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define UDP_TUNNEL_NIC_USE_CNT_MAX U16_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u8 hw_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * struct udp_tunnel_nic - UDP tunnel port offload state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @work: async work for talking to hardware from process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @dev: netdev pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @need_sync: at least one port start changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @need_replay: space was freed, we need a replay of all ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @work_pending: @work is currently scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @n_tables: number of tables under @entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @missed: bitmap of tables which overflown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @entries: table of tables of ports currently offloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct udp_tunnel_nic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u8 need_sync:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 need_replay:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 work_pending:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int n_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct udp_tunnel_nic_table_entry **entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* We ensure all work structs are done using driver state, but not the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * We need a workqueue we can flush before module gets removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct workqueue_struct *udp_tunnel_nic_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) case UDP_TUNNEL_TYPE_VXLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return "vxlan";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case UDP_TUNNEL_TYPE_GENEVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return "geneve";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case UDP_TUNNEL_TYPE_VXLAN_GPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return "vxlan-gpe";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return entry->use_cnt == 0 && !entry->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!udp_tunnel_nic_entry_is_free(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) UDP_TUNNEL_NIC_ENTRY_DEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct udp_tunnel_nic_table_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) entry->flags |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) utn->need_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) memset(ti, 0, sizeof(*ti));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ti->port = entry->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ti->type = entry->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ti->hw_priv = entry->hw_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for (j = 0; j < info->tables[i].n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) const struct udp_tunnel_nic_table_info *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!utn->missed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = 0; i < utn->n_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) table = &dev->udp_tunnel_nic_info->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!test_bit(i, &utn->missed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) for (j = 0; j < table->n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int idx, struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) entry = &utn->entries[table][idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (entry->use_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) udp_tunnel_nic_ti_from_entry(entry, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned int idx, u8 priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) (!err || (err == -EEXIST && dodgy)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) (!err || (err == -ENOENT && dodgy)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) udp_tunnel_nic_device_sync_one(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned int table, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct udp_tunnel_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) entry = &utn->entries[table][idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!udp_tunnel_nic_entry_is_queued(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) udp_tunnel_nic_ti_from_entry(entry, &ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) &ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) udp_tunnel_nic_entry_update_done(entry, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) netdev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) "UDP tunnel port sync failed port %d type %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) be16_to_cpu(entry->port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) udp_tunnel_nic_tunnel_type_name(entry->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for (j = 0; j < info->tables[i].n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) udp_tunnel_nic_device_sync_one(dev, utn, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (i = 0; i < utn->n_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Find something that needs sync in this table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) for (j = 0; j < info->tables[i].n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (j == info->tables[i].n_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) err = info->sync_table(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for (j = 0; j < info->tables[i].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) entry = &utn->entries[i][j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (udp_tunnel_nic_entry_is_queued(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) udp_tunnel_nic_entry_update_done(entry, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!utn->need_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (dev->udp_tunnel_nic_info->sync_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) udp_tunnel_nic_device_sync_by_table(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) udp_tunnel_nic_device_sync_by_port(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) utn->need_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Can't replay directly here, in case we come from the tunnel driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * notification - trying to replay may deadlock inside tunnel driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bool may_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!utn->need_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Drivers which sleep in the callback need to update from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * the workqueue, if we come from the tunnel driver's notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!may_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __udp_tunnel_nic_device_sync(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (may_sleep || utn->need_replay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) queue_work(udp_tunnel_nic_workqueue, &utn->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) utn->work_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return table->tunnel_types & ti->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Special case IPv4-only NICs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ti->sa_family != AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) for (j = 0; j < info->tables[i].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) entry = &utn->entries[i][j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!udp_tunnel_nic_entry_is_free(entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) entry->port == ti->port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) entry->type != ti->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __set_bit(i, &utn->missed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned int table, unsigned int idx, int use_cnt_adj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned int from, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* If not going from used to unused or vice versa - all done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * For dodgy entries make sure we try to sync again (queue the entry).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) entry->use_cnt += use_cnt_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Cancel the op before it was sent to the device, if possible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * otherwise we'd need to take special care to issue commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * in the same order the ports arrived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (use_cnt_adj < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) from = UDP_TUNNEL_NIC_ENTRY_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) to = UDP_TUNNEL_NIC_ENTRY_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) from = UDP_TUNNEL_NIC_ENTRY_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) to = UDP_TUNNEL_NIC_ENTRY_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (entry->flags & from) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) entry->flags &= ~from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!dodgy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) udp_tunnel_nic_entry_queue(utn, entry, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned int table, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct udp_tunnel_info *ti, int use_cnt_adj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (udp_tunnel_nic_entry_is_free(entry) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) entry->port != ti->port ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) entry->type != ti->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (udp_tunnel_nic_entry_is_frozen(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Try to find existing matching entry and adjust its use count, instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * adding a new one. Returns true if entry was found. In case of delete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * entry may have gotten removed in the process, in which case it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * queued for removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct udp_tunnel_info *ti, int use_cnt_adj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) const struct udp_tunnel_nic_table_info *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (i = 0; i < utn->n_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) table = &dev->udp_tunnel_nic_info->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (!udp_tunnel_nic_table_is_capable(table, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) for (j = 0; j < table->n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) use_cnt_adj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) const struct udp_tunnel_nic_table_info *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for (i = 0; i < utn->n_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) table = &dev->udp_tunnel_nic_info->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!udp_tunnel_nic_table_is_capable(table, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) for (j = 0; j < table->n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) entry = &utn->entries[i][j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!udp_tunnel_nic_entry_is_free(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) entry->port = ti->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) entry->type = ti->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) entry->use_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) udp_tunnel_nic_entry_queue(utn, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) UDP_TUNNEL_NIC_ENTRY_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* The different table may still fit this port in, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * are no devices currently which have multiple tables accepting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * the same tunnel type, and false positives are okay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __set_bit(i, &utn->missed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ti->port == htons(IANA_VXLAN_UDP_PORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!udp_tunnel_nic_is_capable(dev, utn, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* It may happen that a tunnel of one type is removed and different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * tunnel type tries to reuse its port before the device was informed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Rely on utn->missed to re-add this port later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (udp_tunnel_nic_has_collision(dev, utn, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (!udp_tunnel_nic_add_existing(dev, utn, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) udp_tunnel_nic_add_new(dev, utn, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) udp_tunnel_nic_device_sync(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!udp_tunnel_nic_is_capable(dev, utn, ti))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) udp_tunnel_nic_del_existing(dev, utn, ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) udp_tunnel_nic_device_sync(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) utn->need_sync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) for (j = 0; j < info->tables[i].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct udp_tunnel_nic_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) entry = &utn->entries[i][j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* We don't release rtnl across ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!entry->use_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) udp_tunnel_nic_entry_queue(utn, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) UDP_TUNNEL_NIC_ENTRY_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) __udp_tunnel_nic_device_sync(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (j = 0; j < info->tables[table].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) size += nla_total_size(0) + /* _TABLE_ENTRY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) for (j = 0; j < info->tables[table].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) utn->entries[table][j].port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ilog2(utn->entries[table][j].type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto err_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) .get_port = __udp_tunnel_nic_get_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) .set_port_priv = __udp_tunnel_nic_set_port_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) .add_port = __udp_tunnel_nic_add_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) .del_port = __udp_tunnel_nic_del_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .reset_ntf = __udp_tunnel_nic_reset_ntf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .dump_size = __udp_tunnel_nic_dump_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .dump_write = __udp_tunnel_nic_dump_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) for (j = 0; j < info->tables[i].n_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int adj_cnt = -utn->entries[i][j].use_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (adj_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __udp_tunnel_nic_device_sync(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) sizeof(**utn->entries)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) WARN_ON(utn->need_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) utn->need_replay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct udp_tunnel_nic_shared_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Freeze all the ports we are already tracking so that the replay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * does not double up the refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) for (j = 0; j < info->tables[i].n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) utn->missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) utn->need_replay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!info->shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) udp_tunnel_get_rx_info(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) list_for_each_entry(node, &info->shared->devices, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) udp_tunnel_get_rx_info(node->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) for (j = 0; j < info->tables[i].n_entries; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct udp_tunnel_nic *utn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) container_of(work, struct udp_tunnel_nic, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) utn->work_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) __udp_tunnel_nic_device_sync(utn->dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (utn->need_replay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) udp_tunnel_nic_replay(utn->dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static struct udp_tunnel_nic *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned int n_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) utn = kzalloc(sizeof(*utn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) utn->n_tables = n_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!utn->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto err_free_utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) for (i = 0; i < n_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) utn->entries[i] = kcalloc(info->tables[i].n_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sizeof(*utn->entries[i]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!utn->entries[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) goto err_free_prev_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) err_free_prev_entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kfree(utn->entries[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) kfree(utn->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) err_free_utn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) kfree(utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) for (i = 0; i < utn->n_tables; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) kfree(utn->entries[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kfree(utn->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) kfree(utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static int udp_tunnel_nic_register(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct udp_tunnel_nic_shared_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned int n_tables, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) UDP_TUNNEL_NIC_MAX_TABLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Expect use count of at most 2 (IPv4, IPv6) per device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Check that the driver info is sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (WARN_ON(!info->set_port != !info->unset_port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) WARN_ON(!info->set_port == !info->sync_table) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) WARN_ON(!info->tables[0].n_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (WARN_ON(info->shared &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) n_tables = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!info->tables[i].n_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) n_tables++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (WARN_ON(!info->tables[i - 1].n_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Create UDP tunnel state structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (info->shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) node = kzalloc(sizeof(*node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) node->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (info->shared && info->shared->udp_tunnel_nic_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) utn = info->shared->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) utn = udp_tunnel_nic_alloc(info, n_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!utn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (info->shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!info->shared->udp_tunnel_nic_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) INIT_LIST_HEAD(&info->shared->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) info->shared->udp_tunnel_nic_info = utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) list_add_tail(&node->list, &info->shared->devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) utn->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dev->udp_tunnel_nic = utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) udp_tunnel_get_rx_info(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* For a shared table remove this dev from the list of sharing devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * and if there are other devices just detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (info->shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct udp_tunnel_nic_shared_node *node, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) list_for_each_entry(node, &info->shared->devices, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (node->dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (list_entry_is_head(node, &info->shared->devices, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) list_del(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) first = list_first_entry_or_null(&info->shared->devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) typeof(*first), list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) udp_tunnel_drop_rx_info(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) utn->dev = first->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto release_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) info->shared->udp_tunnel_nic_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Flush before we check work, so we don't waste time adding entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * from the work which we will boot immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) udp_tunnel_nic_flush(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Wait for the work to be done using the state, netdev core will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * retry unregister until we give up our reference on this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (utn->work_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) udp_tunnel_nic_free(utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) release_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dev->udp_tunnel_nic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) const struct udp_tunnel_nic_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct udp_tunnel_nic *utn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) info = dev->udp_tunnel_nic_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (event == NETDEV_REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) err = udp_tunnel_nic_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return notifier_from_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* All other events will need the udp_tunnel_nic state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) utn = dev->udp_tunnel_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!utn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (event == NETDEV_UNREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) udp_tunnel_nic_unregister(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* All other events only matter if NIC has to be programmed open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (event == NETDEV_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) udp_tunnel_get_rx_info(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (event == NETDEV_GOING_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) udp_tunnel_nic_flush(dev, utn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .notifier_call = udp_tunnel_nic_netdevice_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static int __init udp_tunnel_nic_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!udp_tunnel_nic_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto err_unset_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err_unset_ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) udp_tunnel_nic_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) destroy_workqueue(udp_tunnel_nic_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) late_initcall(udp_tunnel_nic_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static void __exit udp_tunnel_nic_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) udp_tunnel_nic_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) destroy_workqueue(udp_tunnel_nic_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) module_exit(udp_tunnel_nic_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) MODULE_LICENSE("GPL");