Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This program is dual-licensed; you may select either version 2 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * the GNU General Public License ("GPL") or BSD license ("BSD").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This Synopsys DWC XLGMAC software driver and associated documentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * (hereinafter the "Software") is an unsupported proprietary work of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Licensed Product under any End User Software License Agreement or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Agreement for Licensed Products with Synopsys or any supplement thereto.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * in the SOFTWARE may be the trademarks of their respective owners.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "dwc-xlgmac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "dwc-xlgmac-reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static int xlgmac_one_poll(struct napi_struct *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) static int xlgmac_all_poll(struct napi_struct *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	return (ring->dma_desc_count - (ring->cur - ring->dirty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	return (ring->cur - ring->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static int xlgmac_maybe_stop_tx_queue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 			struct xlgmac_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 			struct xlgmac_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 			unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	if (count > xlgmac_tx_avail_desc(ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		netif_info(pdata, drv, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 			   "Tx queue stopped, not enough descriptors available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		ring->tx.queue_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		/* If we haven't notified the hardware because of xmit_more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		 * support, tell it now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		if (ring->tx.xmit_more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 			pdata->hw_ops.tx_start_xmit(channel, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void xlgmac_prep_vlan(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 			     struct xlgmac_pkt_info *pkt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	if (skb_vlan_tag_present(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static int xlgmac_prep_tso(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 			   struct xlgmac_pkt_info *pkt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	ret = skb_cow_head(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	pkt_info->tcp_header_len = tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	pkt_info->mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		  pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	XLGMAC_PR("mss=%u\n", pkt_info->mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	/* Update the number of packets that will ultimately be transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * along with the extra bytes for each extra packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static int xlgmac_is_tso(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (skb->ip_summed != CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (!skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			       struct xlgmac_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			       struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			       struct xlgmac_pkt_info *pkt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	unsigned int context_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	pkt_info->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	context_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	pkt_info->desc_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	pkt_info->tx_packets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	pkt_info->tx_bytes = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	if (xlgmac_is_tso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		/* TSO requires an extra descriptor if mss is different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			context_desc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			pkt_info->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		/* TSO requires an extra descriptor for TSO header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		pkt_info->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 					1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 					1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 					1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		/* VLAN requires an extra descriptor if tag is different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			/* We can share with the TSO context descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			if (!context_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				context_desc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				pkt_info->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 					1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	for (len = skb_headlen(skb); len;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		pkt_info->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		for (len = skb_frag_size(frag); len; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			pkt_info->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	unsigned int rx_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		      ~(XLGMAC_RX_BUF_ALIGN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return rx_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	enum xlgmac_int int_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (channel->tx_ring && channel->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		else if (channel->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		else if (channel->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		hw_ops->enable_int(channel, int_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	enum xlgmac_int int_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		if (channel->tx_ring && channel->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		else if (channel->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		else if (channel->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		hw_ops->disable_int(channel, int_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) static irqreturn_t xlgmac_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	unsigned int dma_isr, dma_ch_isr, mac_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct xlgmac_pdata *pdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct xlgmac_hw_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	unsigned int i, ti, ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	/* The DMA interrupt status register also reports MAC and MTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 * interrupts. So for polling mode, we just need to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 * this register to be non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	dma_isr = readl(pdata->mac_regs + DMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (!dma_isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	for (i = 0; i < pdata->channel_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (!(dma_isr & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		channel = pdata->channel_head + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			  i, dma_ch_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/* The TI or RI interrupt bits may still be set even if using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		 * per channel DMA interrupts. Check to be sure those are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 * enabled before using the private data napi structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 					 DMA_CH_SR_TI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 					 DMA_CH_SR_RI_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		if (!pdata->per_channel_irq && (ti || ri)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			if (napi_schedule_prep(&pdata->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 				/* Disable Tx and Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 				xlgmac_disable_rx_tx_ints(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 				pdata->stats.napi_poll_isr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				/* Turn on polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 				__napi_schedule_irqoff(&pdata->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 					DMA_CH_SR_TPS_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			pdata->stats.tx_process_stopped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 					DMA_CH_SR_RPS_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			pdata->stats.rx_process_stopped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 					DMA_CH_SR_TBU_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			pdata->stats.tx_buffer_unavailable++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 					DMA_CH_SR_RBU_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			pdata->stats.rx_buffer_unavailable++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		/* Restart the device on a Fatal Bus Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 					DMA_CH_SR_FBE_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			pdata->stats.fatal_bus_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			schedule_work(&pdata->restart_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		/* Clear all interrupt signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				DMA_ISR_MACIS_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		mac_isr = readl(pdata->mac_regs + MAC_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 					MAC_ISR_MMCTXIS_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			hw_ops->tx_mmc_int(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 					MAC_ISR_MMCRXIS_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			hw_ops->rx_mmc_int(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static irqreturn_t xlgmac_dma_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	struct xlgmac_channel *channel = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/* Per channel DMA interrupts are enabled, so we use the per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * channel napi structure and not the private data napi structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (napi_schedule_prep(&channel->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		/* Disable Tx and Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		disable_irq_nosync(channel->dma_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		/* Turn on polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		__napi_schedule_irqoff(&channel->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static void xlgmac_tx_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct napi_struct *napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (napi_schedule_prep(napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		/* Disable Tx and Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		if (pdata->per_channel_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			disable_irq_nosync(channel->dma_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			xlgmac_disable_rx_tx_ints(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		pdata->stats.napi_poll_txtimer++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		/* Turn on polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		__napi_schedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	channel->tx_timer_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		if (!channel->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		if (!channel->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		del_timer_sync(&channel->tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (pdata->per_channel_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			if (add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 				netif_napi_add(pdata->netdev, &channel->napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 					       xlgmac_one_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 					       NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			napi_enable(&channel->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		if (add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			netif_napi_add(pdata->netdev, &pdata->napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 				       xlgmac_all_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		napi_enable(&pdata->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (pdata->per_channel_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			napi_disable(&channel->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			if (del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				netif_napi_del(&channel->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		napi_disable(&pdata->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		if (del)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			netif_napi_del(&pdata->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct net_device *netdev = pdata->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			       IRQF_SHARED, netdev->name, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		netdev_alert(netdev, "error requesting irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			     pdata->dev_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (!pdata->per_channel_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		snprintf(channel->dma_irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			 sizeof(channel->dma_irq_name) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			 "%s-TxRx-%u", netdev_name(netdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			 channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		ret = devm_request_irq(pdata->dev, channel->dma_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				       xlgmac_dma_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				       channel->dma_irq_name, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			netdev_alert(netdev, "error requesting irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 				     channel->dma_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	for (i--, channel--; i < pdata->channel_count; i--, channel--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (!pdata->per_channel_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	for (i = 0; i < pdata->channel_count; i++, channel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct xlgmac_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		ring = channel->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		for (j = 0; j < ring->dma_desc_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			desc_ops->unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	struct xlgmac_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		ring = channel->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		for (j = 0; j < ring->dma_desc_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			desc_ops->unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static int xlgmac_start(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct net_device *netdev = pdata->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	hw_ops->init(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	xlgmac_napi_enable(pdata, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	ret = xlgmac_request_irqs(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		goto err_napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	hw_ops->enable_tx(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	hw_ops->enable_rx(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	netif_tx_start_all_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) err_napi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	xlgmac_napi_disable(pdata, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	hw_ops->exit(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static void xlgmac_stop(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct net_device *netdev = pdata->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	netif_tx_stop_all_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	xlgmac_stop_timers(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	hw_ops->disable_tx(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	hw_ops->disable_rx(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	xlgmac_free_irqs(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	xlgmac_napi_disable(pdata, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	hw_ops->exit(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		if (!channel->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		txq = netdev_get_tx_queue(netdev, channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		netdev_tx_reset_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/* If not running, "restart" will happen on open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (!netif_running(pdata->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	xlgmac_stop(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	xlgmac_free_tx_data(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	xlgmac_free_rx_data(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	xlgmac_start(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void xlgmac_restart(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct xlgmac_pdata *pdata = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 						   struct xlgmac_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 						   restart_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	xlgmac_restart_dev(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static int xlgmac_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct xlgmac_desc_ops *desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* TODO: Initialize the phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* Calculate the Rx buffer size before allocating rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	pdata->rx_buf_size = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/* Allocate the channels and rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	ret = desc_ops->alloc_channles_and_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	INIT_WORK(&pdata->restart_work, xlgmac_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	xlgmac_init_timers(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	ret = xlgmac_start(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		goto err_channels_and_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) err_channels_and_rings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	desc_ops->free_channels_and_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static int xlgmac_close(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct xlgmac_desc_ops *desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/* Stop the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	xlgmac_stop(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/* Free the channels and rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	desc_ops->free_channels_and_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	netdev_warn(netdev, "tx timeout, device restarting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	schedule_work(&pdata->restart_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct xlgmac_pkt_info *tx_pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct xlgmac_desc_ops *desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	struct xlgmac_hw_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct xlgmac_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	XLGMAC_PR("skb->len = %d\n", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	channel = pdata->channel_head + skb->queue_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	ring = channel->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	tx_pkt_info = &ring->pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (skb->len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		netif_err(pdata, tx_err, netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			  "empty skb received from stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/* Prepare preliminary packet info for TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* Check that there are enough descriptors available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	ret = xlgmac_maybe_stop_tx_queue(channel, ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 					 tx_pkt_info->desc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	ret = xlgmac_prep_tso(skb, tx_pkt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		netif_err(pdata, tx_err, netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			  "error processing TSO packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	xlgmac_prep_vlan(skb, tx_pkt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (!desc_ops->map_tx_skb(channel, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	/* Report on the actual number of bytes (to be) sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	/* Configure required descriptor fields for transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	hw_ops->dev_xmit(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (netif_msg_pktdata(pdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		xlgmac_print_pkt(netdev, skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/* Stop the queue in advance if there may not be enough descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static void xlgmac_get_stats64(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			       struct rtnl_link_stats64 *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	struct xlgmac_stats *pstats = &pdata->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	pdata->hw_ops.read_mmc_stats(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	s->rx_packets = pstats->rxframecount_gb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	s->rx_bytes = pstats->rxoctetcount_gb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	s->rx_errors = pstats->rxframecount_gb -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		       pstats->rxbroadcastframes_g -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		       pstats->rxmulticastframes_g -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		       pstats->rxunicastframes_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	s->multicast = pstats->rxmulticastframes_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	s->rx_length_errors = pstats->rxlengtherror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	s->rx_crc_errors = pstats->rxcrcerror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	s->rx_fifo_errors = pstats->rxfifooverflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	s->tx_packets = pstats->txframecount_gb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	s->tx_bytes = pstats->txoctetcount_gb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	s->tx_dropped = netdev->stats.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct sockaddr *saddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (!is_valid_ether_addr(saddr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	hw_ops->set_mac_address(pdata, netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static int xlgmac_ioctl(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			struct ifreq *ifreq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (!netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	ret = xlgmac_calc_rx_buf_size(netdev, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	pdata->rx_buf_size = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	netdev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	xlgmac_restart_dev(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				  __be16 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				  u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	set_bit(vid, pdata->active_vlans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	hw_ops->update_vlan_hash_table(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				   __be16 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				   u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	clear_bit(vid, pdata->active_vlans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	hw_ops->update_vlan_hash_table(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static void xlgmac_poll_controller(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (pdata->per_channel_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		for (i = 0; i < pdata->channel_count; i++, channel++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			xlgmac_dma_isr(channel->dma_irq, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		disable_irq(pdata->dev_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		xlgmac_isr(pdata->dev_irq, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		enable_irq(pdata->dev_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) #endif /* CONFIG_NET_POLL_CONTROLLER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static int xlgmac_set_features(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			       netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if ((features & NETIF_F_RXHASH) && !rxhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		ret = hw_ops->enable_rss(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	else if (!(features & NETIF_F_RXHASH) && rxhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		ret = hw_ops->disable_rss(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		hw_ops->enable_rx_csum(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		hw_ops->disable_rx_csum(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		hw_ops->enable_rx_vlan_stripping(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		hw_ops->disable_rx_vlan_stripping(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		hw_ops->enable_rx_vlan_filtering(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		hw_ops->disable_rx_vlan_filtering(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	pdata->netdev_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static void xlgmac_set_rx_mode(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	hw_ops->config_rx_mode(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static const struct net_device_ops xlgmac_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	.ndo_open		= xlgmac_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	.ndo_stop		= xlgmac_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	.ndo_start_xmit		= xlgmac_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	.ndo_tx_timeout		= xlgmac_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	.ndo_get_stats64	= xlgmac_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	.ndo_change_mtu		= xlgmac_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	.ndo_set_mac_address	= xlgmac_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	.ndo_do_ioctl		= xlgmac_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	.ndo_vlan_rx_add_vid	= xlgmac_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	.ndo_vlan_rx_kill_vid	= xlgmac_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	.ndo_poll_controller	= xlgmac_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	.ndo_set_features	= xlgmac_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	.ndo_set_rx_mode	= xlgmac_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) const struct net_device_ops *xlgmac_get_netdev_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return &xlgmac_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct xlgmac_ring *ring = channel->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct xlgmac_desc_ops *desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	struct xlgmac_hw_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	while (ring->dirty != ring->cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		/* Reset desc_data values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		desc_ops->unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		ring->dirty++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* Make sure everything is written before the register write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	/* Update the Rx Tail Pointer Register with address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	 * the last cleaned entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	writel(lower_32_bits(desc_data->dma_desc_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	       XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 					 struct napi_struct *napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 					 struct xlgmac_desc_data *desc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 					 unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	unsigned int copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	u8 *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* Start with the header buffer which may contain just the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * or the header plus data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				      desc_data->rx.hdr.dma_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				      desc_data->rx.hdr.dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				      DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	packet = page_address(desc_data->rx.hdr.pa.pages) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 desc_data->rx.hdr.pa.pages_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	skb_copy_to_linear_data(skb, packet, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	skb_put(skb, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	len -= copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		/* Add the remaining data as a frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		dma_sync_single_range_for_cpu(pdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 					      desc_data->rx.buf.dma_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 					      desc_data->rx.buf.dma_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 					      desc_data->rx.buf.dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 					      DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				desc_data->rx.buf.pa.pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				desc_data->rx.buf.pa.pages_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				len, desc_data->rx.buf.dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		desc_data->rx.buf.pa.pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int xlgmac_tx_poll(struct xlgmac_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	struct xlgmac_ring *ring = channel->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	struct net_device *netdev = pdata->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	unsigned int tx_packets = 0, tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct xlgmac_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct xlgmac_desc_ops *desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct xlgmac_hw_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	int processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	unsigned int cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	desc_ops = &pdata->desc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	/* Nothing to do if there isn't a Tx ring for this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	cur = ring->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	/* Be sure we get ring->cur before accessing descriptor data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	       (ring->dirty != cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		dma_desc = desc_data->dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		if (!hw_ops->tx_complete(dma_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		/* Make sure descriptor fields are read after reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		 * the OWN bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (netif_msg_tx_done(pdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		if (hw_ops->is_last_desc(dma_desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			tx_packets += desc_data->tx.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			tx_bytes += desc_data->tx.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		/* Free the SKB and reset the descriptor for re-use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		desc_ops->unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		hw_ops->tx_desc_reset(desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		ring->dirty++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (!processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	if ((ring->tx.queue_stopped == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	    (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		ring->tx.queue_stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	XLGMAC_PR("processed=%d\n", processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	return processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct xlgmac_ring *ring = channel->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	struct net_device *netdev = pdata->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	unsigned int len, dma_desc_len, max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	unsigned int context_next, context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct xlgmac_pkt_info *pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	unsigned int incomplete, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct xlgmac_hw_ops *hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	unsigned int received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	struct napi_struct *napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	int packet_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/* Nothing to do if there isn't a Rx ring for this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	incomplete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	context_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	pkt_info = &ring->pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	while (packet_count < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		/* First time in loop see if we need to restore state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (!received && desc_data->state_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			skb = desc_data->state.skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			error = desc_data->state.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			len = desc_data->state.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			memset(pkt_info, 0, sizeof(*pkt_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) read_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			xlgmac_rx_refresh(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		if (hw_ops->dev_read(channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		ring->cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		incomplete = XLGMAC_GET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 					RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 					RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		context_next = XLGMAC_GET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		context = XLGMAC_GET_REG_BITS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 					pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 					RX_PACKET_ATTRIBUTES_CONTEXT_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 					RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		/* Earlier error, just drain the remaining data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		if ((incomplete || context_next) && error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			goto read_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		if (error || pkt_info->errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			if (pkt_info->errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				netif_err(pdata, rx_err, netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 					  "error in received packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			goto next_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		if (!context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			/* Length is cumulative, get this descriptor's length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			dma_desc_len = desc_data->rx.len - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			len += dma_desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			if (dma_desc_len && !skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 				skb = xlgmac_create_skb(pdata, napi, desc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 							dma_desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 				if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 					error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			} else if (dma_desc_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				dma_sync_single_range_for_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 						pdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 						desc_data->rx.buf.dma_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 						desc_data->rx.buf.dma_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 						desc_data->rx.buf.dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 						DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				skb_add_rx_frag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 					skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 					desc_data->rx.buf.pa.pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 					desc_data->rx.buf.pa.pages_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 					dma_desc_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 					desc_data->rx.buf.dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 				desc_data->rx.buf.pa.pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		if (incomplete || context_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			goto read_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			goto next_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		/* Be sure we don't exceed the configured MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		max_len = netdev->mtu + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		    (skb->protocol == htons(ETH_P_8021Q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			max_len += VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (skb->len > max_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			netif_err(pdata, rx_err, netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 				  "packet length exceeds configured MTU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			goto next_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		if (netif_msg_pktdata(pdata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			xlgmac_print_pkt(netdev, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 					RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				    RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 					RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				    RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 					       pkt_info->vlan_ctag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			pdata->stats.rx_vlan_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				    RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			skb_set_hash(skb, pkt_info->rss_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				     pkt_info->rss_hash_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		skb->dev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		skb->protocol = eth_type_trans(skb, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		skb_record_rx_queue(skb, channel->queue_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		napi_gro_receive(napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) next_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		packet_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	/* Check if we need to save state before leaving */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (received && (incomplete || context_next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		desc_data->state_saved = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		desc_data->state.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		desc_data->state.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		desc_data->state.error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	XLGMAC_PR("packet_count = %d\n", packet_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	return packet_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int xlgmac_one_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct xlgmac_channel *channel = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 						struct xlgmac_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 						napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	int processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	XLGMAC_PR("budget=%d\n", budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	/* Cleanup Tx ring first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	xlgmac_tx_poll(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	/* Process Rx ring next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	processed = xlgmac_rx_poll(channel, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	/* If we processed everything, we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (processed < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		/* Turn off polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		napi_complete_done(napi, processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		/* Enable Tx and Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		enable_irq(channel->dma_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	XLGMAC_PR("received = %d\n", processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	return processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int xlgmac_all_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	struct xlgmac_pdata *pdata = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 						   struct xlgmac_pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 						   napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	int processed, last_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	int ring_budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	XLGMAC_PR("budget=%d\n", budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	ring_budget = budget / pdata->rx_ring_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		last_processed = processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			/* Cleanup Tx ring first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			xlgmac_tx_poll(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			/* Process Rx ring next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			if (ring_budget > (budget - processed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				ring_budget = budget - processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			processed += xlgmac_rx_poll(channel, ring_budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	} while ((processed < budget) && (processed != last_processed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	/* If we processed everything, we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (processed < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		/* Turn off polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		napi_complete_done(napi, processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		/* Enable Tx and Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		xlgmac_enable_rx_tx_ints(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	XLGMAC_PR("received = %d\n", processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }