Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Lantiq / Intel PMAC driver for XRX200 SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2010 Lantiq Deutschland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 John Crispin <john@phrozen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <xway_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) /* DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define XRX200_DMA_DATA_LEN	0x600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define XRX200_DMA_RX		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define XRX200_DMA_TX		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /* cpu port mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define PMAC_RX_IPG		0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define PMAC_RX_IPG_MASK	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PMAC_HD_CTL		0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Add Ethernet header to packets from DMA to PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define PMAC_HD_CTL_ADD		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /* Add VLAN tag to Packets from DMA to PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define PMAC_HD_CTL_TAG		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* Add CRC to packets from DMA to PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define PMAC_HD_CTL_AC		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* Add status header to packets from PMAC to DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define PMAC_HD_CTL_AS		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) /* Remove CRC from packets from PMAC to DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define PMAC_HD_CTL_RC		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* Remove Layer-2 header from packets from PMAC to DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define PMAC_HD_CTL_RL2		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* Status header is present from DMA to PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define PMAC_HD_CTL_RXSH	BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* Add special tag from PMAC to switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define PMAC_HD_CTL_AST		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* Remove specail Tag from PMAC to DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define PMAC_HD_CTL_RST		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /* Check CRC from DMA to PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define PMAC_HD_CTL_CCRC	BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /* Enable reaction to Pause frames in the PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define PMAC_HD_CTL_FC		BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) struct xrx200_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	int tx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct ltq_dma_channel dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct sk_buff *skb[LTQ_DESC_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct xrx200_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct xrx200_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct xrx200_chan chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct xrx200_chan chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct net_device *net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	__iomem void *pmac_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	return __raw_readl(priv->pmac_reg + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	__raw_writel(val, priv->pmac_reg + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			     u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u32 val = xrx200_pmac_r32(priv, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	val &= ~(clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	xrx200_pmac_w32(priv, val, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /* drop all the packets from the DMA ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static void xrx200_flush_dma(struct xrx200_chan *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	for (i = 0; i < LTQ_DESC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			    XRX200_DMA_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		ch->dma.desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		ch->dma.desc %= LTQ_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int xrx200_open(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct xrx200_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	napi_enable(&priv->chan_tx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	ltq_dma_open(&priv->chan_tx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	ltq_dma_enable_irq(&priv->chan_tx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	napi_enable(&priv->chan_rx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ltq_dma_open(&priv->chan_rx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* The boot loader does not always deactivate the receiving of frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * on the ports and then some packets queue up in the PPE buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * They already passed the PMAC so they do not have the tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * configured here. Read the these packets here and drop them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * The HW should have written them into memory after 10us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	usleep_range(20, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	xrx200_flush_dma(&priv->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	ltq_dma_enable_irq(&priv->chan_rx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	netif_wake_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int xrx200_close(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct xrx200_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	napi_disable(&priv->chan_rx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	ltq_dma_close(&priv->chan_rx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	napi_disable(&priv->chan_tx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	ltq_dma_close(&priv->chan_tx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int xrx200_alloc_skb(struct xrx200_chan *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	struct sk_buff *skb = ch->skb[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 							  XRX200_DMA_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (!ch->skb[ch->dma.desc]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		ch->skb[ch->dma.desc] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	ch->dma.desc_base[ch->dma.desc].addr = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/* Make sure the address is written before we give it to HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	ch->dma.desc_base[ch->dma.desc].ctl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		XRX200_DMA_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int xrx200_hw_receive(struct xrx200_chan *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct xrx200_priv *priv = ch->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct sk_buff *skb = ch->skb[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct net_device *net_dev = priv->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	ret = xrx200_alloc_skb(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	ch->dma.desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	ch->dma.desc %= LTQ_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		net_dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		netdev_err(net_dev, "failed to allocate new rx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	skb->protocol = eth_type_trans(skb, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	net_dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	net_dev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int xrx200_poll_rx(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct xrx200_chan *ch = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 				struct xrx200_chan, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	int rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	while (rx < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			ret = xrx200_hw_receive(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (rx < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		if (napi_complete_done(&ch->napi, rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			ltq_dma_enable_irq(&ch->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct xrx200_chan *ch = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				struct xrx200_chan, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct net_device *net_dev = ch->priv->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	int pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	netif_tx_lock(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	while (pkts < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			struct sk_buff *skb = ch->skb[ch->tx_free];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			pkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			ch->skb[ch->tx_free] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			memset(&ch->dma.desc_base[ch->tx_free], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			       sizeof(struct ltq_dma_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			ch->tx_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			ch->tx_free %= LTQ_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	net_dev->stats.tx_packets += pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	net_dev->stats.tx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	netif_tx_unlock(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (netif_queue_stopped(net_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		netif_wake_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (pkts < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		if (napi_complete_done(&ch->napi, pkts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			ltq_dma_enable_irq(&ch->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				     struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct xrx200_priv *priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct xrx200_chan *ch = &priv->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	u32 byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	skb->dev = net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (skb_put_padto(skb, ETH_ZLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		net_dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		netdev_err(net_dev, "tx ring full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	ch->skb[ch->dma.desc] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (unlikely(dma_mapping_error(priv->dev, mapping)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	/* dma needs to start on a 16 byte aligned address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	byte_offset = mapping % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	desc->addr = mapping - byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/* Make sure the address is written before we give it to HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	ch->dma.desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	ch->dma.desc %= LTQ_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (ch->dma.desc == ch->tx_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	netdev_sent_queue(net_dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) err_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	net_dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	net_dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static const struct net_device_ops xrx200_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	.ndo_open		= xrx200_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	.ndo_stop		= xrx200_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	.ndo_start_xmit		= xrx200_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	.ndo_set_mac_address	= eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	struct xrx200_chan *ch = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (napi_schedule_prep(&ch->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		ltq_dma_disable_irq(&ch->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		__napi_schedule(&ch->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	ltq_dma_ack_irq(&ch->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int xrx200_dma_init(struct xrx200_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct xrx200_chan *ch_rx = &priv->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	struct xrx200_chan *ch_tx = &priv->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ltq_dma_init_port(DMA_PORT_ETOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	ch_rx->dma.nr = XRX200_DMA_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	ch_rx->dma.dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	ch_rx->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	ltq_dma_alloc_rx(&ch_rx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	     ch_rx->dma.desc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		ret = xrx200_alloc_skb(ch_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			goto rx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	ch_rx->dma.desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			       "xrx200_net_rx", &priv->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		dev_err(priv->dev, "failed to request RX irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			ch_rx->dma.irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		goto rx_ring_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	ch_tx->dma.nr = XRX200_DMA_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	ch_tx->dma.dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	ch_tx->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	ltq_dma_alloc_tx(&ch_tx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			       "xrx200_net_tx", &priv->chan_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		dev_err(priv->dev, "failed to request TX irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			ch_tx->dma.irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		goto tx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) tx_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	ltq_dma_free(&ch_tx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) rx_ring_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	/* free the allocated RX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	for (i = 0; i < LTQ_DESC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (priv->chan_rx.skb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			dev_kfree_skb_any(priv->chan_rx.skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) rx_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	ltq_dma_free(&ch_rx->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void xrx200_hw_cleanup(struct xrx200_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	ltq_dma_free(&priv->chan_tx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	ltq_dma_free(&priv->chan_rx.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	/* free the allocated RX ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	for (i = 0; i < LTQ_DESC_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		dev_kfree_skb_any(priv->chan_rx.skb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int xrx200_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	struct xrx200_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	struct net_device *net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	const u8 *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	/* alloc the network device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (!net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	priv->net_dev = net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	net_dev->netdev_ops = &xrx200_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	SET_NETDEV_DEV(net_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	net_dev->min_mtu = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	net_dev->max_mtu = XRX200_DMA_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	/* load the memory ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		dev_err(dev, "failed to get resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	priv->pmac_reg = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (IS_ERR(priv->pmac_reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		dev_err(dev, "failed to request and remap io ranges\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return PTR_ERR(priv->pmac_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (priv->chan_rx.dma.irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (priv->chan_tx.dma.irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	/* get the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	priv->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		dev_err(dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	mac = of_get_mac_address(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (!IS_ERR(mac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		ether_addr_copy(net_dev->dev_addr, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		eth_hw_addr_random(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	/* bring up the dma engine and IP core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	err = xrx200_dma_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	/* enable clock gate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	err = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		goto err_uninit_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	/* set IPG to 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	/* enable status header, enable CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	xrx200_pmac_mask(priv, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			 PMAC_HD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	/* setup NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	err = register_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		goto err_unprepare_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) err_unprepare_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) err_uninit_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	xrx200_hw_cleanup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int xrx200_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct xrx200_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct net_device *net_dev = priv->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	/* free stack related instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	netif_napi_del(&priv->chan_tx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	netif_napi_del(&priv->chan_rx.napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	/* remove the actual device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	unregister_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	/* release the clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	/* shut down hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	xrx200_hw_cleanup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static const struct of_device_id xrx200_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	{ .compatible = "lantiq,xrx200-net" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) MODULE_DEVICE_TABLE(of, xrx200_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static struct platform_driver xrx200_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	.probe = xrx200_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	.remove = xrx200_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		.name = "lantiq,xrx200-net",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		.of_match_table = xrx200_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) module_platform_driver(xrx200_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) MODULE_AUTHOR("John Crispin <john@phrozen.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) MODULE_LICENSE("GPL");