^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/mrp_bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include "br_private_mrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct net_bridge_port *s_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct net_bridge_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) if (port == p_port ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) port == s_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct net_bridge_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (port == i_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct net_bridge_port *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct net_bridge_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) list_for_each_entry(port, &br->port_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (port->dev->ifindex == ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) res = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct br_mrp *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) list_for_each_entry_rcu(mrp, &br->mrp_list, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) lockdep_rtnl_is_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (mrp->ring_id == ring_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) res = mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct br_mrp *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) list_for_each_entry_rcu(mrp, &br->mrp_list, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) lockdep_rtnl_is_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (mrp->in_id == in_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) res = mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) list_for_each_entry_rcu(mrp, &br->mrp_list, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) lockdep_rtnl_is_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) p = rtnl_dereference(mrp->p_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (p && p->dev->ifindex == ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) p = rtnl_dereference(mrp->s_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (p && p->dev->ifindex == ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) p = rtnl_dereference(mrp->i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (p && p->dev->ifindex == ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct net_bridge_port *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct br_mrp *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) list_for_each_entry_rcu(mrp, &br->mrp_list, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) lockdep_rtnl_is_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (rcu_access_pointer(mrp->p_port) == p ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rcu_access_pointer(mrp->s_port) == p ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rcu_access_pointer(mrp->i_port) == p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) res = mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int br_mrp_next_seq(struct br_mrp *mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mrp->seq_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return mrp->seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) const u8 *src, const u8 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct ethhdr *eth_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __be16 *version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) skb->dev = p->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) skb->protocol = htons(ETH_P_MRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) skb->priority = MRP_FRAME_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) skb_reserve(skb, sizeof(*eth_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) eth_hdr = skb_push(skb, sizeof(*eth_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ether_addr_copy(eth_hdr->h_dest, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ether_addr_copy(eth_hdr->h_source, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) eth_hdr->h_proto = htons(ETH_P_MRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) version = skb_put(skb, sizeof(*version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *version = cpu_to_be16(MRP_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void br_mrp_skb_tlv(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) enum br_mrp_tlv_header_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u8 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) hdr = skb_put(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) hdr->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) hdr->length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct br_mrp_common_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) hdr = skb_put(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) enum br_mrp_port_role_type port_role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct br_mrp_ring_test_hdr *hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) hdr = skb_put(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) hdr->prio = cpu_to_be16(mrp->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) hdr->port_role = cpu_to_be16(port_role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) hdr->state = cpu_to_be16(mrp->ring_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) hdr->transitions = cpu_to_be16(mrp->ring_transitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) br_mrp_skb_common(skb, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum br_mrp_port_role_type port_role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct br_mrp_in_test_hdr *hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) hdr = skb_put(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) hdr->id = cpu_to_be16(mrp->in_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) hdr->port_role = cpu_to_be16(port_role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) hdr->state = cpu_to_be16(mrp->in_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) hdr->transitions = cpu_to_be16(mrp->in_transitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) br_mrp_skb_common(skb, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* This function is continuously called in the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * - when node role is MRM, in this case test_monitor is always set to false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * because it needs to notify the userspace that the ring is open and needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * send MRP_Test frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * - when node role is MRA, there are 2 subcases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * - when MRA behaves as MRM, in this case is similar with MRM role
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * - when MRA behaves as MRC, in this case test_monitor is set to true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * because it needs to detect when it stops seeing MRP_Test frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * from MRM node but it doesn't need to send MRP_Test frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void br_mrp_test_work_expired(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct delayed_work *del_work = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool notify_open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (time_before_eq(mrp->test_end, jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (mrp->test_count_miss < mrp->test_max_miss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mrp->test_count_miss++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Notify that the ring is open only if the ring state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * closed, otherwise it would continue to notify at every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Also notify that the ring is open when the node has the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * role MRA and behaves as MRC. The reason is that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * userspace needs to know when the MRM stopped sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * MRP_Test frames so that the current node to try to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * the role of a MRM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mrp->test_monitor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) notify_open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) p = rcu_dereference(mrp->p_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!mrp->test_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) skb = br_mrp_alloc_test_skb(mrp, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) BR_MRP_PORT_ROLE_PRIMARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (notify_open && !mrp->ring_role_offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) br_mrp_ring_port_open(p->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) p = rcu_dereference(mrp->s_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!mrp->test_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) skb = br_mrp_alloc_test_skb(mrp, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) BR_MRP_PORT_ROLE_SECONDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (notify_open && !mrp->ring_role_offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) br_mrp_ring_port_open(p->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) queue_delayed_work(system_wq, &mrp->test_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) usecs_to_jiffies(mrp->test_interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* This function is continuously called when the node has the interconnect role
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * MIM. It would generate interconnect test frames and will send them on all 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * ports. But will also check if it stop receiving interconnect test frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void br_mrp_in_test_work_expired(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct delayed_work *del_work = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) bool notify_open = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (time_before_eq(mrp->in_test_end, jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mrp->in_test_count_miss++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Notify that the interconnect ring is open only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * interconnect ring state is closed, otherwise it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * continue to notify at every interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) notify_open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) p = rcu_dereference(mrp->p_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) skb = br_mrp_alloc_in_test_skb(mrp, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) BR_MRP_PORT_ROLE_PRIMARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (notify_open && !mrp->in_role_offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) br_mrp_in_port_open(p->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) p = rcu_dereference(mrp->s_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) skb = br_mrp_alloc_in_test_skb(mrp, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) BR_MRP_PORT_ROLE_SECONDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (notify_open && !mrp->in_role_offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) br_mrp_in_port_open(p->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) p = rcu_dereference(mrp->i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) skb = br_mrp_alloc_in_test_skb(mrp, p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) BR_MRP_PORT_ROLE_INTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (notify_open && !mrp->in_role_offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) br_mrp_in_port_open(p->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) queue_delayed_work(system_wq, &mrp->in_test_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) usecs_to_jiffies(mrp->in_test_interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Deletes the MRP instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * note: called under rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Stop sending MRP_Test frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cancel_delayed_work_sync(&mrp->test_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Stop sending MRP_InTest frames if has an interconnect role */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cancel_delayed_work_sync(&mrp->in_test_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) br_mrp_switchdev_del(br, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Reset the ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) p = rtnl_dereference(mrp->p_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) state = netif_running(br->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) BR_STATE_FORWARDING : BR_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) p->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) p->flags &= ~BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) br_mrp_port_switchdev_set_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rcu_assign_pointer(mrp->p_port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) p = rtnl_dereference(mrp->s_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) state = netif_running(br->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) BR_STATE_FORWARDING : BR_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) p->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) p->flags &= ~BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) br_mrp_port_switchdev_set_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rcu_assign_pointer(mrp->s_port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) p = rtnl_dereference(mrp->i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) state = netif_running(br->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) BR_STATE_FORWARDING : BR_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) p->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) p->flags &= ~BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) br_mrp_port_switchdev_set_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rcu_assign_pointer(mrp->i_port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) list_del_rcu(&mrp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) kfree_rcu(mrp, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Adds a new MRP instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * note: called under rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* If the ring exists, it is not possible to create another one with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * same ring_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mrp = br_mrp_find_id(br, instance->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!br_mrp_get_port(br, instance->p_ifindex) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) !br_mrp_get_port(br, instance->s_ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* It is not possible to have the same port part of multiple rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) !br_mrp_unique_ifindex(br, instance->s_ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) mrp->ring_id = instance->ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) mrp->prio = instance->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) p = br_mrp_get_port(br, instance->p_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) p->state = BR_STATE_FORWARDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) p->flags |= BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rcu_assign_pointer(mrp->p_port, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) p = br_mrp_get_port(br, instance->s_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) p->state = BR_STATE_FORWARDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) p->flags |= BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) rcu_assign_pointer(mrp->s_port, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) list_add_tail_rcu(&mrp->list, &br->mrp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err = br_mrp_switchdev_add(br, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto delete_mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) delete_mrp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) br_mrp_del_impl(br, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Deletes the MRP instance from which the port is part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * note: called under rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct br_mrp *mrp = br_mrp_find_port(br, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* If the port is not part of a MRP instance just bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) br_mrp_del_impl(br, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Deletes existing MRP instance based on ring_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * note: called under rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) br_mrp_del_impl(br, mrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Set port state, port state can be forwarding, blocked or disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int br_mrp_set_port_state(struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) enum br_mrp_port_state_type state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 port_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!p || !(p->flags & BR_MRP_AWARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) spin_lock_bh(&p->br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (state == BR_MRP_PORT_STATE_FORWARDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) port_state = BR_STATE_FORWARDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) port_state = BR_STATE_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) p->state = port_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) spin_unlock_bh(&p->br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) br_mrp_port_switchdev_set_state(p, port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Set port role, port role can be primary or secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int br_mrp_set_port_role(struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) enum br_mrp_port_role_type role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!p || !(p->flags & BR_MRP_AWARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) mrp = br_mrp_find_port(p->br, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) switch (role) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) case BR_MRP_PORT_ROLE_PRIMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rcu_assign_pointer(mrp->p_port, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case BR_MRP_PORT_ROLE_SECONDARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) rcu_assign_pointer(mrp->s_port, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) br_mrp_port_switchdev_set_role(p, role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Set ring state, ring state can be only Open or Closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int br_mrp_set_ring_state(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct br_mrp_ring_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (mrp->ring_state != state->ring_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mrp->ring_transitions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mrp->ring_state = state->ring_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * MRC(Media Redundancy Client).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int br_mrp_set_ring_role(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct br_mrp_ring_role *role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) mrp->ring_role = role->ring_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* If there is an error just bailed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Now detect if the HW actually applied the role or not. If the HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * applied the role it means that the SW will not to do those operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * anymore. For example if the role ir MRM then the HW will notify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * SW when ring is open, but if the is not pushed to the HW the SW will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * need to detect when the ring is open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Start to generate or monitor MRP test frames, the frames are generated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * HW and if it fails, they are generated by the SW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int br_mrp_start_test(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct br_mrp_start_test *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Try to push it to the HW and if it fails then continue with SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * implementation and if that also fails then return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) test->max_miss, test->period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) test->monitor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mrp->test_interval = test->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mrp->test_end = jiffies + usecs_to_jiffies(test->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mrp->test_max_miss = test->max_miss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) mrp->test_monitor = test->monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mrp->test_count_miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) queue_delayed_work(system_wq, &mrp->test_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) usecs_to_jiffies(test->interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Set in state, int state can be only Open or Closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (mrp->in_state != state->in_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mrp->in_transitions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) mrp->in_state = state->in_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Set in role, in role can be only MIM(Media Interconnection Manager) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * MIC(Media Interconnection Client).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct net_bridge_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!br_mrp_get_port(br, role->i_ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* It is not allowed to disable a port that doesn't exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) p = rtnl_dereference(mrp->i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Stop the generating MRP_InTest frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) cancel_delayed_work_sync(&mrp->in_test_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Remove the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) state = netif_running(br->dev) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) BR_STATE_FORWARDING : BR_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) p->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) p->flags &= ~BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) br_mrp_port_switchdev_set_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rcu_assign_pointer(mrp->i_port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) mrp->in_role = role->in_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mrp->in_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* It is not possible to have the same port part of multiple rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!br_mrp_unique_ifindex(br, role->i_ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* It is not allowed to set a different interconnect port if the mrp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * instance has already one. First it needs to be disabled and after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * that set the new port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (rcu_access_pointer(mrp->i_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) p = br_mrp_get_port(br, role->i_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) spin_lock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) p->state = BR_STATE_FORWARDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) p->flags |= BR_MRP_AWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) spin_unlock_bh(&br->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) rcu_assign_pointer(mrp->i_port, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mrp->in_role = role->in_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) mrp->in_id = role->in_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* If there is an error just bailed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) role->ring_id, role->in_role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Now detect if the HW actually applied the role or not. If the HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * applied the role it means that the SW will not to do those operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * anymore. For example if the role is MIM then the HW will notify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * SW when interconnect ring is open, but if the is not pushed to the HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * the SW will need to detect when the interconnect ring is open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Start to generate MRP_InTest frames, the frames are generated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * HW and if it fails, they are generated by the SW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * note: already called with rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int br_mrp_start_in_test(struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct br_mrp_start_in_test *in_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (!mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Try to push it to the HW and if it fails then continue with SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * implementation and if that also fails then return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) in_test->max_miss, in_test->period))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mrp->in_test_interval = in_test->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mrp->in_test_max_miss = in_test->max_miss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) mrp->in_test_count_miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) queue_delayed_work(system_wq, &mrp->in_test_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) usecs_to_jiffies(in_test->interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* Determin if the frame type is a ring frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static bool br_mrp_ring_frame(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) hdr->type == BR_MRP_TLV_HEADER_OPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Determin if the frame type is an interconnect frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static bool br_mrp_in_frame(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Process only MRP Test frame. All the other MRP frames are processed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * userspace application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Each MRP header starts with a version field which is 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Therefore skip the version and get directly the TLV header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) mrp->test_count_miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Notify the userspace that the ring is closed only when the ring is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * not closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) br_mrp_ring_port_open(port->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Determin if the test hdr has a better priority than the node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) const struct br_mrp_ring_test_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) u16 prio = be16_to_cpu(hdr->prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (prio < mrp->prio ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) (prio == mrp->prio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* Process only MRP Test frame. All the other MRP frames are processed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * userspace application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct net_bridge_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) const struct br_mrp_ring_test_hdr *test_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct br_mrp_ring_test_hdr _test_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Each MRP header starts with a version field which is 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Therefore skip the version and get directly the TLV header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) sizeof(_test_hdr), &_test_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (!test_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Only frames that have a better priority than the node will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * clear the miss counter because otherwise the node will need to behave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * as MRM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (br_mrp_test_better_than_own(mrp, br, test_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) mrp->test_count_miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Process only MRP InTest frame. All the other MRP frames are processed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * userspace application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) const struct br_mrp_in_test_hdr *in_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct br_mrp_in_test_hdr _in_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Each MRP header starts with a version field which is 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * Therefore skip the version and get directly the TLV header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* The check for InTest frame type was already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) sizeof(_in_hdr), &_in_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!in_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* It needs to process only it's own InTest frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (mrp->in_id != ntohs(in_hdr->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mrp->in_test_count_miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Notify the userspace that the ring is closed only when the ring is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * not closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) br_mrp_in_port_open(port->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* Get the MRP frame type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static u8 br_mrp_get_frame_type(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) const struct br_mrp_tlv_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct br_mrp_tlv_hdr _hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Each MRP header starts with a version field which is 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * Therefore skip the version and get directly the TLV header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return hdr->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* This will just forward the frame to the other mrp ring ports, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * the frame type, ring role and interconnect role
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static int br_mrp_rcv(struct net_bridge_port *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct net_bridge_port *p_port, *s_port, *i_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct net_bridge *br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct br_mrp *mrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* If port is disabled don't accept any frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (p->state == BR_STATE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) br = p->br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mrp = br_mrp_find_port(br, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (unlikely(!mrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) p_port = rcu_dereference(mrp->p_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!p_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) p_dst = p_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) s_port = rcu_dereference(mrp->s_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!s_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) s_dst = s_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* If the frame is a ring frame then it is not required to check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * interconnect role and ports to process or forward the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (br_mrp_ring_frame(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* If the role is MRM then don't forward the frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) br_mrp_mrm_process(mrp, p, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* If the role is MRA then don't forward the frames if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * behaves as MRM node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!mrp->test_monitor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) br_mrp_mrm_process(mrp, p, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) br_mrp_mra_process(mrp, br, p, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (br_mrp_in_frame(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) u8 in_type = br_mrp_get_frame_type(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) i_port = rcu_dereference(mrp->i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) i_dst = i_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* If the ring port is in block state it should not forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * In_Test frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (br_mrp_is_ring_port(p_port, s_port, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) p->state == BR_STATE_BLOCKING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) in_type == BR_MRP_TLV_HEADER_IN_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Nodes that behaves as MRM needs to stop forwarding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * frames in case the ring is closed, otherwise will be a loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * In this case the frame is no forward between the ring ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (br_mrp_mrm_behaviour(mrp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) br_mrp_is_ring_port(p_port, s_port, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) (s_port->state != BR_STATE_FORWARDING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) p_port->state != BR_STATE_FORWARDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) p_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) s_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* A node that behaves as MRC and doesn't have a interconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * role then it should forward all frames between the ring ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * because it doesn't have an interconnect port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (br_mrp_mrc_behaviour(mrp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* MIM should not forward it's own InTest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (br_mrp_mim_process(mrp, p, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (br_mrp_is_ring_port(p_port, s_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) i_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (br_mrp_is_in_port(i_port, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* MIM should forward IntLinkChange and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * IntTopoChange between ring ports but MIM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * should not forward IntLinkChange and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * IntTopoChange if the frame was received at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * the interconnect port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (br_mrp_is_ring_port(p_port, s_port, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) i_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (br_mrp_is_in_port(i_port, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* MIC should forward InTest frames on all ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * regardless of the received port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* MIC should forward IntLinkChange frames only if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * are received on ring ports to all the ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (br_mrp_is_ring_port(p_port, s_port, p) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Should forward the InTopo frames only between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * ring ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) i_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* In all the other cases don't forward the frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) goto no_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (p_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) br_forward(p_dst, skb, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (s_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) br_forward(s_dst, skb, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (i_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) br_forward(i_dst, skb, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) no_forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* Check if the frame was received on a port that is part of MRP ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * and if the frame has MRP eth. In that case process the frame otherwise do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * normal forwarding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* If there is no MRP instance do normal forwarding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (likely(!(p->flags & BR_MRP_AWARE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (unlikely(skb->protocol == htons(ETH_P_MRP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return br_mrp_rcv(p, skb, p->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) bool br_mrp_enabled(struct net_bridge *br)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return !list_empty(&br->mrp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }