Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright 2011-2014 Autronica Fire and Security AS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author(s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Frame router for HSR and PRP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "hsr_forward.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "hsr_main.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "hsr_framereg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct hsr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /* The uses I can see for these HSR supervision frames are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *    22") to reset any sequence_nr counters belonging to that node. Useful if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *    the other node's counter has been reset for some reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *    --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *    Or not - resetting the counter and bridging the frame would create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *    loop, unfortunately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *    frame is received from a particular node, we know something is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *    We just register these (as with normal frames) and throw them away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * 3) Allow different MAC addresses for the two slave interfaces, using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *    MacAddressA field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct ethhdr *eth_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct hsr_sup_tag *hsr_sup_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct hsrv1_ethhdr_sp *hsr_V1_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/* Correct addr? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (!ether_addr_equal(eth_hdr->h_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			      hsr->sup_multicast_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* Correct ether type?. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	      eth_hdr->h_proto == htons(ETH_P_HSR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* Get the supervision header from correct location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		hsr_sup_tag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		     &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	    hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	    hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	    hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (hsr_sup_tag->HSR_TLV_length != 12 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	    hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 					       struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int copylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	unsigned char *dst, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	skb_pull(skb_in, HSR_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	skb_push(skb_in, HSR_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		skb->csum_start -= HSR_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	copylen = 2 * ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (frame->is_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		copylen += VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	src = skb_mac_header(skb_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	dst = skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	memcpy(dst, src, copylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	skb->protocol = eth_hdr(skb)->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				       struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!frame->skb_std) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (frame->skb_hsr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			frame->skb_std =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 				create_stripped_skb_hsr(frame->skb_hsr, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			/* Unexpected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				  __FILE__, __LINE__, port->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return skb_clone(frame->skb_std, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				       struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (!frame->skb_std) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (frame->skb_prp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			/* trim the skb by len - HSR_HLEN to exclude RCT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			skb_trim(frame->skb_prp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				 frame->skb_prp->len - HSR_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			frame->skb_std =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				__pskb_copy(frame->skb_prp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 					    skb_headroom(frame->skb_prp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					    GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			/* Unexpected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				  __FILE__, __LINE__, port->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return skb_clone(frame->skb_std, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void prp_set_lan_id(struct prp_rct *trailer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			   struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	int lane_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (port->type == HSR_PT_SLAVE_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		lane_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		lane_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	/* Add net_id in the upper 3 bits of lane_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	lane_id |= port->hsr->net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	set_prp_lan_id(trailer, lane_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Tailroom for PRP rct should have been created before calling this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				    struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				    struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct prp_rct *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	int min_size = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int lsdu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (frame->is_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		min_size = VLAN_ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (skb_put_padto(skb, min_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	lsdu_size = skb->len - 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (frame->is_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		lsdu_size -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	prp_set_lan_id(trailer, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	set_prp_LSDU_size(trailer, lsdu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	trailer->sequence_nr = htons(frame->sequence_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	trailer->PRP_suffix = htons(ETH_P_PRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			    struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int path_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (port->type == HSR_PT_SLAVE_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		path_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		path_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 				    struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				    struct hsr_port *port, u8 proto_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct hsr_ethhdr *hsr_ethhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	int lsdu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/* pad to minimum packet size which is 60 + 6 (HSR tag) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	lsdu_size = skb->len - 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (frame->is_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		lsdu_size -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	hsr_set_path_id(hsr_ethhdr, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			ETH_P_HSR : ETH_P_PRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* If the original frame was an HSR tagged frame, just clone it to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * unchanged. Otherwise, create a private frame especially tagged for 'port'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 					struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	unsigned char *dst, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	int movelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (frame->skb_hsr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		struct hsr_ethhdr *hsr_ethhdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			(struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		/* set the lane id properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		hsr_set_path_id(hsr_ethhdr, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return skb_clone(frame->skb_hsr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/* Create the new skb with enough headroom to fit the HSR tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	skb = __pskb_copy(frame->skb_std,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			  skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		skb->csum_start += HSR_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	movelen = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (frame->is_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		movelen += VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	src = skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	dst = skb_push(skb, HSR_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	memmove(dst, src, movelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * that case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 					struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (frame->skb_prp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (trailer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			prp_set_lan_id(trailer, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			WARN_ONCE(!trailer, "errored PRP skb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return skb_clone(frame->skb_prp, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	skb = skb_copy_expand(frame->skb_std, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			      skb_tailroom(frame->skb_std) + HSR_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			      GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	prp_fill_rct(skb, frame, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			       struct hsr_node *node_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	bool was_multicast_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	hsr_addr_subst_source(node_src, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	skb_pull(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	res = netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (res == NET_RX_DROP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		if (was_multicast_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			dev->stats.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		    struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (frame->port_rcv->type == HSR_PT_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		hsr_addr_subst_dest(frame->node_src, skb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		/* Address substitution (IEC62439-3 pp 26, 50): replace mac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		 * address of outgoing frame with that of the outgoing slave's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		 port->type ==  HSR_PT_SLAVE_B) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		(frame->port_rcv->type == HSR_PT_SLAVE_B &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		 port->type ==  HSR_PT_SLAVE_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Forward the frame through all devices except:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * - Back through the receiving device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * - If it's a HSR frame: through a device where it has passed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * - if it's a PRP frame: through another PRP slave device (no bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * - To the local HSR master only if the frame is directly addressed to it, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  *   a non-supervision multicast or broadcast frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * HSR slave devices should insert a HSR tag into the frame, or forward the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * frame unchanged if it's already tagged. Interlink devices should strip HSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * tags if they're of the non-HSR type (but only after duplicate discard). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * master device always strips HSR tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void hsr_forward_do(struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct hsr_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	hsr_for_each_port(frame->port_rcv->hsr, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		struct hsr_priv *hsr = port->hsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		/* Don't send frame back the way it came */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		if (port == frame->port_rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		/* Don't deliver locally unless we should */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		/* Deliver frames directly addressed to us to master only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		/* Don't send frame over port where it has been sent before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * Also fro SAN, this shouldn't be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		if (!frame->is_from_san &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		    hsr_register_frame_out(port, frame->node_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 					   frame->sequence_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		if (frame->is_supervision && port->type == HSR_PT_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			hsr_handle_sup_frame(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		/* Check if frame is to be dropped. Eg. for PRP no forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 * between ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		if (hsr->proto_ops->drop_frame &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		    hsr->proto_ops->drop_frame(frame, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		if (port->type != HSR_PT_MASTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			skb = hsr->proto_ops->create_tagged_frame(frame, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			skb = hsr->proto_ops->get_untagged_frame(frame, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			frame->port_rcv->dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		skb->dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		if (port->type == HSR_PT_MASTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			hsr_deliver_master(skb, port->dev, frame->node_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			hsr_xmit(skb, port, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			     struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		frame->is_local_exclusive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		skb->pkt_type = PACKET_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		frame->is_local_exclusive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (skb->pkt_type == PACKET_HOST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	    skb->pkt_type == PACKET_MULTICAST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	    skb->pkt_type == PACKET_BROADCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		frame->is_local_dest = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		frame->is_local_dest = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void handle_std_frame(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			     struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct hsr_port *port = frame->port_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct hsr_priv *hsr = port->hsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	frame->skb_hsr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	frame->skb_prp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	frame->skb_std = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (port->type != HSR_PT_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		frame->is_from_san = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		/* Sequence nr for the master node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		frame->sequence_nr = hsr->sequence_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		hsr->sequence_nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (proto == htons(ETH_P_PRP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	    proto == htons(ETH_P_HSR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		/* Check if skb contains hsr_ethhdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (skb->mac_len < sizeof(struct hsr_ethhdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		/* HSR tagged frame :- Data or Supervision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		frame->skb_std = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		frame->skb_prp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		frame->skb_hsr = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	/* Standard frame or PRP from master port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	handle_std_frame(skb, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			struct hsr_frame_info *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	/* Supervision frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct prp_rct *rct = skb_get_PRP_rct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	if (rct &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	    prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		frame->skb_hsr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		frame->skb_std = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		frame->skb_prp = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		frame->sequence_nr = prp_get_skb_sequence_nr(rct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	handle_std_frame(skb, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static int fill_frame_info(struct hsr_frame_info *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			   struct sk_buff *skb, struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	struct hsr_priv *hsr = port->hsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	struct hsr_vlan_ethhdr *vlan_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct ethhdr *ethhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	__be16 proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	/* Check if skb contains ethhdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	if (skb->mac_len < sizeof(struct ethhdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	memset(frame, 0, sizeof(*frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 				       frame->is_supervision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 				       port->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (!frame->node_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return -1; /* Unknown node and !is_supervision, or no mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	frame->is_vlan = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	proto = ethhdr->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (proto == htons(ETH_P_8021Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		frame->is_vlan = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (frame->is_vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		/* FIXME: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		netdev_warn_once(skb->dev, "VLAN not yet supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	frame->is_from_san = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	frame->port_rcv = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	check_local_dest(port->hsr, skb, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Must be called holding rcu read lock (because of the port parameter) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	struct hsr_frame_info frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (fill_frame_info(&frame, skb, port) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		goto out_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	hsr_forward_do(&frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	/* Gets called for ingress frames as well as egress from master port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	 * So check and increment stats for master port only here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (port->type == HSR_PT_MASTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		port->dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		port->dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	kfree_skb(frame.skb_hsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	kfree_skb(frame.skb_prp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	kfree_skb(frame.skb_std);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) out_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	port->dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }