Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     2) /* niu.c: Neptune ethernet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     4)  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    15) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    16) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    19) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    20) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    21) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    22) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    23) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    24) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    25) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    26) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    27) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    28) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    29) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    30) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    33) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    34) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    36) #include "niu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    38) #define DRV_MODULE_NAME		"niu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    39) #define DRV_MODULE_VERSION	"1.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    40) #define DRV_MODULE_RELDATE	"Apr 22, 2010"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    42) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    43) 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    45) MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    46) MODULE_DESCRIPTION("NIU ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    47) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    48) MODULE_VERSION(DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    50) #ifndef readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    51) static u64 readq(void __iomem *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    53) 	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    56) static void writeq(u64 val, void __iomem *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    58) 	writel(val & 0xffffffff, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    59) 	writel(val >> 32, reg + 0x4UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    61) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    63) static const struct pci_device_id niu_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    64) 	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    65) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    68) MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    70) #define NIU_TX_TIMEOUT			(5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    72) #define nr64(reg)		readq(np->regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    73) #define nw64(reg, val)		writeq((val), np->regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    75) #define nr64_mac(reg)		readq(np->mac_regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    76) #define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    78) #define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    79) #define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    81) #define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    82) #define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    84) #define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    85) #define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    87) #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    89) static int niu_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    90) static int debug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    91) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    92) MODULE_PARM_DESC(debug, "NIU debug level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    94) #define niu_lock_parent(np, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    95) 	spin_lock_irqsave(&np->parent->lock, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    96) #define niu_unlock_parent(np, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    97) 	spin_unlock_irqrestore(&np->parent->lock, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    99) static int serdes_init_10g_serdes(struct niu *np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   101) static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   102) 				     u64 bits, int limit, int delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   104) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   105) 		u64 val = nr64_mac(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   107) 		if (!(val & bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   108) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   109) 		udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   111) 	if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   112) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   113) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   116) static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   117) 					u64 bits, int limit, int delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   118) 					const char *reg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   120) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   122) 	nw64_mac(reg, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   123) 	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   124) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   125) 		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   126) 			   (unsigned long long)bits, reg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   127) 			   (unsigned long long)nr64_mac(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   128) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   131) #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   132) ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   133) 	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   134) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   136) static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   137) 				     u64 bits, int limit, int delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   139) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   140) 		u64 val = nr64_ipp(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   142) 		if (!(val & bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   143) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   144) 		udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   146) 	if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   147) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   151) static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   152) 					u64 bits, int limit, int delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   153) 					const char *reg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   155) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   156) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   158) 	val = nr64_ipp(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   159) 	val |= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   160) 	nw64_ipp(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   162) 	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   163) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   164) 		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   165) 			   (unsigned long long)bits, reg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   166) 			   (unsigned long long)nr64_ipp(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   167) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   170) #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   171) ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   172) 	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   173) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   175) static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   176) 				 u64 bits, int limit, int delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   178) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   179) 		u64 val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   181) 		if (!(val & bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   182) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   183) 		udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   185) 	if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   186) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   187) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   190) #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   191) ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   192) 	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   193) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   195) static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   196) 				    u64 bits, int limit, int delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   197) 				    const char *reg_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   199) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   201) 	nw64(reg, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   202) 	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   203) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   204) 		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   205) 			   (unsigned long long)bits, reg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   206) 			   (unsigned long long)nr64(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   207) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   210) #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   211) ({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   212) 	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   213) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   215) static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   217) 	u64 val = (u64) lp->timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   219) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   220) 		val |= LDG_IMGMT_ARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   222) 	nw64(LDG_IMGMT(lp->ldg_num), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   225) static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   227) 	unsigned long mask_reg, bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   228) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   230) 	if (ldn < 0 || ldn > LDN_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   233) 	if (ldn < 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   234) 		mask_reg = LD_IM0(ldn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   235) 		bits = LD_IM0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   236) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   237) 		mask_reg = LD_IM1(ldn - 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   238) 		bits = LD_IM1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   241) 	val = nr64(mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   242) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   243) 		val &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   244) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   245) 		val |= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   246) 	nw64(mask_reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   248) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   251) static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   253) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   254) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   256) 	for (i = 0; i <= LDN_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   257) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   259) 		if (parent->ldg_map[i] != lp->ldg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   260) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   262) 		err = niu_ldn_irq_enable(np, i, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   263) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   264) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   266) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   269) static int niu_enable_interrupts(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   271) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   273) 	for (i = 0; i < np->num_ldg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   274) 		struct niu_ldg *lp = &np->ldg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   275) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   277) 		err = niu_enable_ldn_in_ldg(np, lp, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   278) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   279) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   281) 	for (i = 0; i < np->num_ldg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   282) 		niu_ldg_rearm(np, &np->ldg[i], on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   287) static u32 phy_encode(u32 type, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   289) 	return type << (port * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   292) static u32 phy_decode(u32 val, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   294) 	return (val >> (port * 2)) & PORT_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   297) static int mdio_wait(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   299) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   300) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   302) 	while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   303) 		val = nr64(MIF_FRAME_OUTPUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   304) 		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   305) 			return val & MIF_FRAME_OUTPUT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   307) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   310) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   313) static int mdio_read(struct niu *np, int port, int dev, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   315) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   317) 	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   318) 	err = mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   319) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   320) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   322) 	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   323) 	return mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   326) static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   328) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   330) 	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   331) 	err = mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   332) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   333) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   335) 	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   336) 	err = mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   337) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   338) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   340) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   343) static int mii_read(struct niu *np, int port, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   345) 	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   346) 	return mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   349) static int mii_write(struct niu *np, int port, int reg, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   351) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   353) 	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   354) 	err = mdio_wait(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   355) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   356) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   358) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   361) static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   363) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   365) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   366) 			 ESR2_TI_PLL_TX_CFG_L(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   367) 			 val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   368) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   369) 		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   370) 				 ESR2_TI_PLL_TX_CFG_H(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   371) 				 val >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   372) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   375) static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   377) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   379) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   380) 			 ESR2_TI_PLL_RX_CFG_L(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   381) 			 val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   382) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   383) 		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   384) 				 ESR2_TI_PLL_RX_CFG_H(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   385) 				 val >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   386) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   389) /* Mode is always 10G fiber.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   390) static int serdes_init_niu_10g_fiber(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   392) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   393) 	u32 tx_cfg, rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   394) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   396) 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   397) 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   398) 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   399) 		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   401) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   402) 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   404) 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   405) 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   407) 		tx_cfg |= PLL_TX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   408) 		rx_cfg |= PLL_RX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   411) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   412) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   413) 		int err = esr2_set_tx_cfg(np, i, tx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   414) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   415) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   418) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   419) 		int err = esr2_set_rx_cfg(np, i, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   420) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   421) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   427) static int serdes_init_niu_1g_serdes(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   429) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   430) 	u16 pll_cfg, pll_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   431) 	int max_retry = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   432) 	u64 sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   433) 	u32 tx_cfg, rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   434) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   435) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   437) 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   438) 		  PLL_TX_CFG_RATE_HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   439) 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   440) 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   441) 		  PLL_RX_CFG_RATE_HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   443) 	if (np->port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   444) 		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   446) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   447) 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   449) 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   450) 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   452) 		tx_cfg |= PLL_TX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   453) 		rx_cfg |= PLL_RX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   456) 	/* Initialize PLL for 1G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   457) 	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   459) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   460) 			 ESR2_TI_PLL_CFG_L, pll_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   461) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   462) 		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   463) 			   np->port, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   464) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   467) 	pll_sts = PLL_CFG_ENPLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   469) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   470) 			 ESR2_TI_PLL_STS_L, pll_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   471) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   472) 		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   473) 			   np->port, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   474) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   477) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   479) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   480) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   481) 		err = esr2_set_tx_cfg(np, i, tx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   482) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   483) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   486) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   487) 		err = esr2_set_rx_cfg(np, i, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   488) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   489) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   492) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   493) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   494) 		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   495) 		mask = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   496) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   498) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   499) 		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   500) 		mask = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   501) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   503) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   504) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   507) 	while (max_retry--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   508) 		sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   509) 		if ((sig & mask) == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   510) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   512) 		mdelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   515) 	if ((sig & mask) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   516) 		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   517) 			   np->port, (int)(sig & mask), (int)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   518) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   521) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   524) static int serdes_init_niu_10g_serdes(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   526) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   527) 	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   528) 	int max_retry = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   529) 	u64 sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   530) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   531) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   533) 	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   534) 	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   535) 		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   536) 		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   538) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   539) 		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   541) 		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   542) 			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   544) 		tx_cfg |= PLL_TX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   545) 		rx_cfg |= PLL_RX_CFG_ENTEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   548) 	/* Initialize PLL for 10G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   549) 	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   551) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   552) 			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   553) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   554) 		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   555) 			   np->port, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   556) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   559) 	pll_sts = PLL_CFG_ENPLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   561) 	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   562) 			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   563) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   564) 		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   565) 			   np->port, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   566) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   569) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   571) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   572) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   573) 		err = esr2_set_tx_cfg(np, i, tx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   574) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   575) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   578) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   579) 		err = esr2_set_rx_cfg(np, i, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   580) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   581) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   584) 	/* check if serdes is ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   586) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   587) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   588) 		mask = ESR_INT_SIGNALS_P0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   589) 		val = (ESR_INT_SRDY0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   590) 		       ESR_INT_DET0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   591) 		       ESR_INT_XSRDY_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   592) 		       ESR_INT_XDP_P0_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   593) 		       ESR_INT_XDP_P0_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   594) 		       ESR_INT_XDP_P0_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   595) 		       ESR_INT_XDP_P0_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   596) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   598) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   599) 		mask = ESR_INT_SIGNALS_P1_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   600) 		val = (ESR_INT_SRDY0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   601) 		       ESR_INT_DET0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   602) 		       ESR_INT_XSRDY_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   603) 		       ESR_INT_XDP_P1_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   604) 		       ESR_INT_XDP_P1_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   605) 		       ESR_INT_XDP_P1_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   606) 		       ESR_INT_XDP_P1_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   607) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   609) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   610) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   613) 	while (max_retry--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   614) 		sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   615) 		if ((sig & mask) == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   616) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   618) 		mdelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   621) 	if ((sig & mask) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   622) 		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   623) 			np->port, (int)(sig & mask), (int)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   625) 		/* 10G failed, try initializing at 1G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   626) 		err = serdes_init_niu_1g_serdes(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   627) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   628) 			np->flags &= ~NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   629) 			np->mac_xcvr = MAC_XCVR_PCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   630) 		}  else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   631) 			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   632) 				   np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   633) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   636) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   639) static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   641) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   643) 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   644) 	if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   645) 		*val = (err & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   646) 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   647) 				ESR_RXTX_CTRL_H(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   648) 		if (err >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   649) 			*val |= ((err & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   650) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   652) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   655) static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   657) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   659) 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   660) 			ESR_GLUE_CTRL0_L(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   661) 	if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   662) 		*val = (err & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   663) 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   664) 				ESR_GLUE_CTRL0_H(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   665) 		if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   666) 			*val |= ((err & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   667) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   668) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   670) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   673) static int esr_read_reset(struct niu *np, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   675) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   677) 	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   678) 			ESR_RXTX_RESET_CTRL_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   679) 	if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   680) 		*val = (err & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   681) 		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   682) 				ESR_RXTX_RESET_CTRL_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   683) 		if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   684) 			*val |= ((err & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   685) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   688) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   691) static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   693) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   695) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   696) 			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   697) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   698) 		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   699) 				 ESR_RXTX_CTRL_H(chan), (val >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   700) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   703) static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   705) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   707) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   708) 			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   709) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   710) 		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   711) 				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   712) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   715) static int esr_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   717) 	u32 reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   718) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   720) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   721) 			 ESR_RXTX_RESET_CTRL_L, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   722) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   723) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   724) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   725) 			 ESR_RXTX_RESET_CTRL_H, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   726) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   727) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   728) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   730) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   731) 			 ESR_RXTX_RESET_CTRL_L, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   732) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   733) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   734) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   736) 	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   737) 			 ESR_RXTX_RESET_CTRL_H, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   738) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   739) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   740) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   742) 	err = esr_read_reset(np, &reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   743) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   744) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   745) 	if (reset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   746) 		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   747) 			   np->port, reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   748) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   754) static int serdes_init_10g(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   756) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   757) 	unsigned long ctrl_reg, test_cfg_reg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   758) 	u64 ctrl_val, test_cfg_val, sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   759) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   761) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   762) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   763) 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   764) 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   765) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   766) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   767) 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   768) 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   769) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   771) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   772) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   774) 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   775) 		    ENET_SERDES_CTRL_SDET_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   776) 		    ENET_SERDES_CTRL_SDET_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   777) 		    ENET_SERDES_CTRL_SDET_3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   778) 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   779) 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   780) 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   781) 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   782) 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   783) 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   784) 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   785) 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   786) 	test_cfg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   788) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   789) 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   790) 				  ENET_SERDES_TEST_MD_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   791) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   792) 				  ENET_SERDES_TEST_MD_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   793) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   794) 				  ENET_SERDES_TEST_MD_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   795) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   796) 				  ENET_SERDES_TEST_MD_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   799) 	nw64(ctrl_reg, ctrl_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   800) 	nw64(test_cfg_reg, test_cfg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   802) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   803) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   804) 		u32 rxtx_ctrl, glue0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   806) 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   807) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   808) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   809) 		err = esr_read_glue0(np, i, &glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   810) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   811) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   813) 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   814) 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   815) 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   817) 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   818) 			   ESR_GLUE_CTRL0_THCNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   819) 			   ESR_GLUE_CTRL0_BLTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   820) 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   821) 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   822) 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   823) 			  (BLTIME_300_CYCLES <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   824) 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   826) 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   827) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   828) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   829) 		err = esr_write_glue0(np, i, glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   830) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   831) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   834) 	err = esr_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   835) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   836) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   838) 	sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   839) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   840) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   841) 		mask = ESR_INT_SIGNALS_P0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   842) 		val = (ESR_INT_SRDY0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   843) 		       ESR_INT_DET0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   844) 		       ESR_INT_XSRDY_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   845) 		       ESR_INT_XDP_P0_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   846) 		       ESR_INT_XDP_P0_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   847) 		       ESR_INT_XDP_P0_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   848) 		       ESR_INT_XDP_P0_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   849) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   851) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   852) 		mask = ESR_INT_SIGNALS_P1_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   853) 		val = (ESR_INT_SRDY0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   854) 		       ESR_INT_DET0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   855) 		       ESR_INT_XSRDY_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   856) 		       ESR_INT_XDP_P1_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   857) 		       ESR_INT_XDP_P1_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   858) 		       ESR_INT_XDP_P1_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   859) 		       ESR_INT_XDP_P1_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   860) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   862) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   863) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   866) 	if ((sig & mask) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   867) 		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   868) 			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   869) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   871) 		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   872) 			   np->port, (int)(sig & mask), (int)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   873) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   875) 	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   876) 		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   877) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   880) static int serdes_init_1g(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   882) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   884) 	val = nr64(ENET_SERDES_1_PLL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   885) 	val &= ~ENET_SERDES_PLL_FBDIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   886) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   887) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   888) 		val |= ENET_SERDES_PLL_HRATE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   889) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   890) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   891) 		val |= ENET_SERDES_PLL_HRATE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   893) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   894) 		val |= ENET_SERDES_PLL_HRATE2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   895) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   896) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   897) 		val |= ENET_SERDES_PLL_HRATE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   898) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   899) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   900) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   902) 	nw64(ENET_SERDES_1_PLL_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   904) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   907) static int serdes_init_1g_serdes(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   909) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   910) 	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   911) 	u64 ctrl_val, test_cfg_val, sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   912) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   913) 	u64 reset_val, val_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   915) 	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   916) 		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   917) 		ENET_SERDES_PLL_FBDIV0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   918) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   919) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   920) 		reset_val =  ENET_SERDES_RESET_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   921) 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   922) 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   923) 		pll_cfg = ENET_SERDES_0_PLL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   924) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   925) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   926) 		reset_val =  ENET_SERDES_RESET_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   927) 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   928) 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   929) 		pll_cfg = ENET_SERDES_1_PLL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   930) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   932) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   933) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   935) 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   936) 		    ENET_SERDES_CTRL_SDET_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   937) 		    ENET_SERDES_CTRL_SDET_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   938) 		    ENET_SERDES_CTRL_SDET_3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   939) 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   940) 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   941) 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   942) 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   943) 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   944) 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   945) 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   946) 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   947) 	test_cfg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   949) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   950) 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   951) 				  ENET_SERDES_TEST_MD_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   952) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   953) 				  ENET_SERDES_TEST_MD_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   954) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   955) 				  ENET_SERDES_TEST_MD_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   956) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   957) 				  ENET_SERDES_TEST_MD_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   960) 	nw64(ENET_SERDES_RESET, reset_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   961) 	mdelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   962) 	val_rd = nr64(ENET_SERDES_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   963) 	val_rd &= ~reset_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   964) 	nw64(pll_cfg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   965) 	nw64(ctrl_reg, ctrl_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   966) 	nw64(test_cfg_reg, test_cfg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   967) 	nw64(ENET_SERDES_RESET, val_rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   968) 	mdelay(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   970) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   971) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   972) 		u32 rxtx_ctrl, glue0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   974) 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   975) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   976) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   977) 		err = esr_read_glue0(np, i, &glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   978) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   979) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   981) 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   982) 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   983) 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   985) 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   986) 			   ESR_GLUE_CTRL0_THCNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   987) 			   ESR_GLUE_CTRL0_BLTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   988) 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   989) 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   990) 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   991) 			  (BLTIME_300_CYCLES <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   992) 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   994) 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   995) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   996) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   997) 		err = esr_write_glue0(np, i, glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   998) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   999) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1003) 	sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1004) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1005) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1006) 		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1007) 		mask = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1008) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1010) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1011) 		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1012) 		mask = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1013) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1015) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1016) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1019) 	if ((sig & mask) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1020) 		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1021) 			   np->port, (int)(sig & mask), (int)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1022) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1025) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1028) static int link_status_1g_serdes(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1030) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1031) 	int link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1032) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1033) 	u16 current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1034) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1035) 	u8 current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1037) 	link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1038) 	current_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1039) 	current_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1041) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1043) 	val = nr64_pcs(PCS_MII_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1045) 	if (val & PCS_MII_STAT_LINK_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1046) 		link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1047) 		current_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1048) 		current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1051) 	lp->active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1052) 	lp->active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1053) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1055) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1056) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1059) static int link_status_10g_serdes(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1061) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1062) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1063) 	int link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1064) 	int link_ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1065) 	u64 val, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1066) 	u16 current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1067) 	u8 current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1069) 	if (!(np->flags & NIU_FLAGS_10G))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1070) 		return link_status_1g_serdes(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1072) 	current_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1073) 	current_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1074) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1076) 	val = nr64_xpcs(XPCS_STATUS(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1077) 	val2 = nr64_mac(XMAC_INTER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1078) 	if (val2 & 0x01000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1079) 		link_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1081) 	if ((val & 0x1000ULL) && link_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1082) 		link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1083) 		current_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1084) 		current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1086) 	lp->active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1087) 	lp->active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1088) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1089) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1090) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1093) static int link_status_mii(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1095) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1096) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1097) 	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1098) 	int supported, advertising, active_speed, active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1100) 	err = mii_read(np, np->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1101) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1102) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1103) 	bmcr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1105) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1106) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1107) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1108) 	bmsr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1110) 	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1111) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1112) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1113) 	advert = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1115) 	err = mii_read(np, np->phy_addr, MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1116) 	if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1117) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1118) 	lpa = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1120) 	if (likely(bmsr & BMSR_ESTATEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1121) 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1122) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1123) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1124) 		estatus = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1126) 		err = mii_read(np, np->phy_addr, MII_CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1127) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1128) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1129) 		ctrl1000 = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1131) 		err = mii_read(np, np->phy_addr, MII_STAT1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1132) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1133) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1134) 		stat1000 = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1135) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1136) 		estatus = ctrl1000 = stat1000 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1138) 	supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1139) 	if (bmsr & BMSR_ANEGCAPABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1140) 		supported |= SUPPORTED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1141) 	if (bmsr & BMSR_10HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1142) 		supported |= SUPPORTED_10baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1143) 	if (bmsr & BMSR_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1144) 		supported |= SUPPORTED_10baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1145) 	if (bmsr & BMSR_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1146) 		supported |= SUPPORTED_100baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1147) 	if (bmsr & BMSR_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1148) 		supported |= SUPPORTED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1149) 	if (estatus & ESTATUS_1000_THALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1150) 		supported |= SUPPORTED_1000baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1151) 	if (estatus & ESTATUS_1000_TFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1152) 		supported |= SUPPORTED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1153) 	lp->supported = supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1155) 	advertising = mii_adv_to_ethtool_adv_t(advert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1156) 	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1158) 	if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1159) 		int neg, neg1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1161) 		lp->active_autoneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1162) 		advertising |= ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1164) 		neg = advert & lpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1165) 		neg1000 = (ctrl1000 << 2) & stat1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1167) 		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1168) 			active_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1169) 		else if (neg & LPA_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1170) 			active_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1171) 		else if (neg & (LPA_10HALF | LPA_10FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1172) 			active_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1173) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1174) 			active_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1176) 		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1177) 			active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1178) 		else if (active_speed != SPEED_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1179) 			active_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1180) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1181) 			active_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1182) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1183) 		lp->active_autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1185) 		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1186) 			active_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1187) 		else if (bmcr & BMCR_SPEED100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1188) 			active_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1189) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1190) 			active_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1192) 		if (bmcr & BMCR_FULLDPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1193) 			active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1194) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1195) 			active_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1198) 	lp->active_advertising = advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1199) 	lp->active_speed = active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1200) 	lp->active_duplex = active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1201) 	*link_up_p = !!(bmsr & BMSR_LSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1203) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1206) static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1208) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1209) 	u16 current_speed, bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1210) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1211) 	u8 current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1212) 	int err, link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1214) 	link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1215) 	current_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1216) 	current_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1218) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1220) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1221) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1222) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1224) 	bmsr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1225) 	if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1226) 		link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1227) 		current_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1228) 		current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1230) 	lp->active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1231) 	lp->active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1232) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1234) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1235) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1237) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1238) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1241) static int link_status_1g(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1243) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1244) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1245) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1247) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1249) 	err = link_status_mii(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1250) 	lp->supported |= SUPPORTED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1251) 	lp->active_advertising |= ADVERTISED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1253) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1254) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1257) static int bcm8704_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1259) 	int err, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1261) 	err = mdio_read(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1262) 			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1263) 	if (err < 0 || err == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1264) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1265) 	err |= BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1266) 	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1267) 			 MII_BMCR, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1268) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1269) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1271) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1272) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1273) 		err = mdio_read(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1274) 				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1275) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1276) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1277) 		if (!(err & BMCR_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1278) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1280) 	if (limit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1281) 		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1282) 			   np->port, (err & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1283) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1288) /* When written, certain PHY registers need to be read back twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1289)  * in order for the bits to settle properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1291) static int bcm8704_user_dev3_readback(struct niu *np, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1293) 	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1294) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1295) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1296) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1297) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1298) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1302) static int bcm8706_init_user_dev3(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1304) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1307) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1308) 			BCM8704_USER_OPT_DIGITAL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1309) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1310) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1311) 	err &= ~USER_ODIG_CTRL_GPIOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1312) 	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1313) 	err |=  USER_ODIG_CTRL_RESV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1314) 	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1315) 			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1316) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1317) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1319) 	mdelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1324) static int bcm8704_init_user_dev3(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1326) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1328) 	err = mdio_write(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1329) 			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1330) 			 (USER_CONTROL_OPTXRST_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1331) 			  USER_CONTROL_OPBIASFLT_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1332) 			  USER_CONTROL_OBTMPFLT_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1333) 			  USER_CONTROL_OPPRFLT_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1334) 			  USER_CONTROL_OPTXFLT_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1335) 			  USER_CONTROL_OPRXLOS_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1336) 			  USER_CONTROL_OPRXFLT_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1337) 			  USER_CONTROL_OPTXON_LVL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1338) 			  (0x3f << USER_CONTROL_RES1_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1339) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1340) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1342) 	err = mdio_write(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1343) 			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1344) 			 (USER_PMD_TX_CTL_XFP_CLKEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1345) 			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1346) 			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1347) 			  USER_PMD_TX_CTL_TSCK_LPWREN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1348) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1349) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1351) 	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1352) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1353) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1354) 	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1355) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1356) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1358) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1359) 			BCM8704_USER_OPT_DIGITAL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1360) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1361) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1362) 	err &= ~USER_ODIG_CTRL_GPIOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1363) 	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1364) 	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1365) 			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1366) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1367) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1369) 	mdelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1371) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1374) static int mrvl88x2011_act_led(struct niu *np, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1376) 	int	err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1378) 	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1379) 		MRVL88X2011_LED_8_TO_11_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1380) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1381) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1383) 	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1384) 	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1386) 	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1387) 			  MRVL88X2011_LED_8_TO_11_CTL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1390) static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1392) 	int	err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1394) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1395) 			MRVL88X2011_LED_BLINK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1396) 	if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1397) 		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1398) 		err |= (rate << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1400) 		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1401) 				 MRVL88X2011_LED_BLINK_CTL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1404) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1407) static int xcvr_init_10g_mrvl88x2011(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1409) 	int	err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1411) 	/* Set LED functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1412) 	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1413) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1414) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1416) 	/* led activity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1417) 	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1418) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1419) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1421) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1422) 			MRVL88X2011_GENERAL_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1423) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1424) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1426) 	err |= MRVL88X2011_ENA_XFPREFCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1428) 	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1429) 			 MRVL88X2011_GENERAL_CTL, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1430) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1431) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1433) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1434) 			MRVL88X2011_PMA_PMD_CTL_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1435) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1436) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1438) 	if (np->link_config.loopback_mode == LOOPBACK_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1439) 		err |= MRVL88X2011_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1440) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1441) 		err &= ~MRVL88X2011_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1443) 	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1444) 			 MRVL88X2011_PMA_PMD_CTL_1, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1445) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1446) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1448) 	/* Enable PMD  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1449) 	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1450) 			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1454) static int xcvr_diag_bcm870x(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1456) 	u16 analog_stat0, tx_alarm_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1457) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1459) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1460) 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1461) 			MII_STAT1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1462) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1463) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1464) 	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1466) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1467) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1468) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1469) 	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1471) 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1472) 			MII_NWAYTEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1473) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1474) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1475) 	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1476) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1478) 	/* XXX dig this out it might not be so useful XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1479) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1480) 			BCM8704_USER_ANALOG_STATUS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1481) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1482) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1483) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1484) 			BCM8704_USER_ANALOG_STATUS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1485) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1486) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1487) 	analog_stat0 = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1489) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1490) 			BCM8704_USER_TX_ALARM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1491) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1492) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1493) 	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1494) 			BCM8704_USER_TX_ALARM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1495) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1496) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1497) 	tx_alarm_status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1499) 	if (analog_stat0 != 0x03fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1500) 		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1501) 			pr_info("Port %u cable not connected or bad cable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1502) 				np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1503) 		} else if (analog_stat0 == 0x639c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1504) 			pr_info("Port %u optical module is bad or missing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1505) 				np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1512) static int xcvr_10g_set_lb_bcm870x(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1514) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1515) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1517) 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1518) 			MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1519) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1520) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1522) 	err &= ~BMCR_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1524) 	if (lp->loopback_mode == LOOPBACK_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1525) 		err |= BMCR_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1527) 	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1528) 			 MII_BMCR, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1529) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1530) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1532) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1535) static int xcvr_init_10g_bcm8706(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1537) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1538) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1540) 	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1541) 	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1542) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1544) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1545) 	val &= ~XMAC_CONFIG_LED_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1546) 	val |= XMAC_CONFIG_FORCE_LED_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1547) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1549) 	val = nr64(MIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1550) 	val |= MIF_CONFIG_INDIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1551) 	nw64(MIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1553) 	err = bcm8704_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1554) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1555) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1557) 	err = xcvr_10g_set_lb_bcm870x(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1558) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1559) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1561) 	err = bcm8706_init_user_dev3(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1562) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1563) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1565) 	err = xcvr_diag_bcm870x(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1566) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1567) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1569) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1572) static int xcvr_init_10g_bcm8704(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1574) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1576) 	err = bcm8704_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1577) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1578) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1580) 	err = bcm8704_init_user_dev3(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1581) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1582) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1584) 	err = xcvr_10g_set_lb_bcm870x(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1585) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1586) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1588) 	err =  xcvr_diag_bcm870x(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1589) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1590) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1592) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1595) static int xcvr_init_10g(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1597) 	int phy_id, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1598) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1600) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1601) 	val &= ~XMAC_CONFIG_LED_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1602) 	val |= XMAC_CONFIG_FORCE_LED_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1603) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1605) 	/* XXX shared resource, lock parent XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1606) 	val = nr64(MIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1607) 	val |= MIF_CONFIG_INDIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1608) 	nw64(MIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1610) 	phy_id = phy_decode(np->parent->port_phy, np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1611) 	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1613) 	/* handle different phy types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1614) 	switch (phy_id & NIU_PHY_ID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1615) 	case NIU_PHY_ID_MRVL88X2011:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1616) 		err = xcvr_init_10g_mrvl88x2011(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1617) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1619) 	default: /* bcom 8704 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1620) 		err = xcvr_init_10g_bcm8704(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1621) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1624) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1627) static int mii_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1629) 	int limit, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1631) 	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1632) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1633) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1635) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1636) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1637) 		udelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1638) 		err = mii_read(np, np->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1639) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1640) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1641) 		if (!(err & BMCR_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1642) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1644) 	if (limit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1645) 		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1646) 			   np->port, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1647) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1650) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1653) static int xcvr_init_1g_rgmii(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1655) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1656) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1657) 	u16 bmcr, bmsr, estat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1659) 	val = nr64(MIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1660) 	val &= ~MIF_CONFIG_INDIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1661) 	nw64(MIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1663) 	err = mii_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1664) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1665) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1667) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1668) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1669) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1670) 	bmsr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1672) 	estat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1673) 	if (bmsr & BMSR_ESTATEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1674) 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1675) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1676) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1677) 		estat = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1680) 	bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1681) 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1682) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1683) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1685) 	if (bmsr & BMSR_ESTATEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1686) 		u16 ctrl1000 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1688) 		if (estat & ESTATUS_1000_TFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1689) 			ctrl1000 |= ADVERTISE_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1690) 		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1691) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1692) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1695) 	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1697) 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1698) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1699) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1701) 	err = mii_read(np, np->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1702) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1703) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1704) 	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1706) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1707) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1708) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1710) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1713) static int mii_init_common(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1715) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1716) 	u16 bmcr, bmsr, adv, estat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1717) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1719) 	err = mii_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1720) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1721) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1723) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1724) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1725) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1726) 	bmsr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1728) 	estat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1729) 	if (bmsr & BMSR_ESTATEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1730) 		err = mii_read(np, np->phy_addr, MII_ESTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1731) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1732) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1733) 		estat = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1736) 	bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1737) 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1738) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1739) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1741) 	if (lp->loopback_mode == LOOPBACK_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1742) 		bmcr |= BMCR_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1743) 		if (lp->active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1744) 			bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1745) 		if (lp->active_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1746) 			bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1749) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1750) 		u16 aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1752) 		aux = (BCM5464R_AUX_CTL_EXT_LB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1753) 		       BCM5464R_AUX_CTL_WRITE_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1754) 		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1755) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1756) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1759) 	if (lp->autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1760) 		u16 ctrl1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1762) 		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1763) 		if ((bmsr & BMSR_10HALF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1764) 			(lp->advertising & ADVERTISED_10baseT_Half))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1765) 			adv |= ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1766) 		if ((bmsr & BMSR_10FULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1767) 			(lp->advertising & ADVERTISED_10baseT_Full))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1768) 			adv |= ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1769) 		if ((bmsr & BMSR_100HALF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1770) 			(lp->advertising & ADVERTISED_100baseT_Half))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1771) 			adv |= ADVERTISE_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1772) 		if ((bmsr & BMSR_100FULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1773) 			(lp->advertising & ADVERTISED_100baseT_Full))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1774) 			adv |= ADVERTISE_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1775) 		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1776) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1777) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1779) 		if (likely(bmsr & BMSR_ESTATEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1780) 			ctrl1000 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1781) 			if ((estat & ESTATUS_1000_THALF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1782) 				(lp->advertising & ADVERTISED_1000baseT_Half))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1783) 				ctrl1000 |= ADVERTISE_1000HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1784) 			if ((estat & ESTATUS_1000_TFULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1785) 				(lp->advertising & ADVERTISED_1000baseT_Full))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1786) 				ctrl1000 |= ADVERTISE_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1787) 			err = mii_write(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1788) 					MII_CTRL1000, ctrl1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1789) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1790) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1793) 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1794) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1795) 		/* !lp->autoneg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1796) 		int fulldpx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1798) 		if (lp->duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1799) 			bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1800) 			fulldpx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1801) 		} else if (lp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1802) 			fulldpx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1803) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1804) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1806) 		if (lp->speed == SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1807) 			/* if X-full requested while not supported, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1808) 			   X-half requested while not supported... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1809) 			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1810) 				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1811) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1812) 			bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1813) 		} else if (lp->speed == SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1814) 			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1815) 				(!fulldpx && !(bmsr & BMSR_100HALF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1816) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1817) 			bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1818) 		} else if (lp->speed == SPEED_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1819) 			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1820) 				(!fulldpx && !(bmsr & BMSR_10HALF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1821) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1822) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1823) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1826) 	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1827) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1828) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1830) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1831) 	err = mii_read(np, np->phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1832) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1833) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1834) 	bmcr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1836) 	err = mii_read(np, np->phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1837) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1838) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1839) 	bmsr = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1841) 	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1842) 		np->port, bmcr, bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1843) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1845) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1848) static int xcvr_init_1g(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1850) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1852) 	/* XXX shared resource, lock parent XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1853) 	val = nr64(MIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1854) 	val &= ~MIF_CONFIG_INDIRECT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1855) 	nw64(MIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1857) 	return mii_init_common(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1860) static int niu_xcvr_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1862) 	const struct niu_phy_ops *ops = np->phy_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1863) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1865) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1866) 	if (ops->xcvr_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1867) 		err = ops->xcvr_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1869) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1872) static int niu_serdes_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1874) 	const struct niu_phy_ops *ops = np->phy_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1875) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1877) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1878) 	if (ops->serdes_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1879) 		err = ops->serdes_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1881) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1884) static void niu_init_xif(struct niu *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1885) static void niu_handle_led(struct niu *, int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1887) static int niu_link_status_common(struct niu *np, int link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1889) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1890) 	struct net_device *dev = np->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1891) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1893) 	if (!netif_carrier_ok(dev) && link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1894) 		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1895) 			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1896) 			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1897) 			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1898) 			   "10Mbit/sec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1899) 			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1901) 		spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1902) 		niu_init_xif(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1903) 		niu_handle_led(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1904) 		spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1906) 		netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1907) 	} else if (netif_carrier_ok(dev) && !link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1908) 		netif_warn(np, link, dev, "Link is down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1909) 		spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1910) 		niu_handle_led(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1911) 		spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1912) 		netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1915) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1918) static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1920) 	int err, link_up, pma_status, pcs_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1922) 	link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1924) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1925) 			MRVL88X2011_10G_PMD_STATUS_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1926) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1927) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1929) 	/* Check PMA/PMD Register: 1.0001.2 == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1930) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1931) 			MRVL88X2011_PMA_PMD_STATUS_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1932) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1933) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1935) 	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1937)         /* Check PMC Register : 3.0001.2 == 1: read twice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1938) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1939) 			MRVL88X2011_PMA_PMD_STATUS_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1940) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1941) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1943) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1944) 			MRVL88X2011_PMA_PMD_STATUS_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1945) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1946) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1948) 	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1950)         /* Check XGXS Register : 4.0018.[0-3,12] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1951) 	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1952) 			MRVL88X2011_10G_XGXS_LANE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1953) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1954) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1956) 	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1957) 		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1958) 		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1959) 		    0x800))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1960) 		link_up = (pma_status && pcs_status) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1962) 	np->link_config.active_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1963) 	np->link_config.active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1964) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1965) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1966) 	mrvl88x2011_act_led(np, (link_up ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1967) 				 MRVL88X2011_LED_CTL_PCS_ACT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1968) 				 MRVL88X2011_LED_CTL_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1970) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1971) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1974) static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1976) 	int err, link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1977) 	link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1979) 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1980) 			BCM8704_PMD_RCV_SIGDET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1981) 	if (err < 0 || err == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1982) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1983) 	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1984) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1985) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1988) 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1989) 			BCM8704_PCS_10G_R_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1990) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1991) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1993) 	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1994) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1995) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1998) 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1999) 			BCM8704_PHYXS_XGXS_LANE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2000) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2001) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2002) 	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2003) 		    PHYXS_XGXS_LANE_STAT_MAGIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2004) 		    PHYXS_XGXS_LANE_STAT_PATTEST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2005) 		    PHYXS_XGXS_LANE_STAT_LANE3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2006) 		    PHYXS_XGXS_LANE_STAT_LANE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2007) 		    PHYXS_XGXS_LANE_STAT_LANE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2008) 		    PHYXS_XGXS_LANE_STAT_LANE0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2009) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2010) 		np->link_config.active_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2011) 		np->link_config.active_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2012) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2015) 	link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2016) 	np->link_config.active_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2017) 	np->link_config.active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2018) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2021) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2022) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2025) static int link_status_10g_bcom(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2027) 	int err, link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2029) 	link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2031) 	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2032) 			BCM8704_PMD_RCV_SIGDET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2033) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2034) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2035) 	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2036) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2037) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2040) 	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2041) 			BCM8704_PCS_10G_R_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2042) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2043) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2044) 	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2045) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2046) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2049) 	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2050) 			BCM8704_PHYXS_XGXS_LANE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2051) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2052) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2054) 	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2055) 		    PHYXS_XGXS_LANE_STAT_MAGIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2056) 		    PHYXS_XGXS_LANE_STAT_LANE3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2057) 		    PHYXS_XGXS_LANE_STAT_LANE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2058) 		    PHYXS_XGXS_LANE_STAT_LANE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2059) 		    PHYXS_XGXS_LANE_STAT_LANE0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2060) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2061) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2064) 	link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2065) 	np->link_config.active_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2066) 	np->link_config.active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2067) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2069) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2070) 	*link_up_p = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2071) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2074) static int link_status_10g(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2076) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2077) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2079) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2081) 	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2082) 		int phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2084) 		phy_id = phy_decode(np->parent->port_phy, np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2085) 		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2087) 		/* handle different phy types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2088) 		switch (phy_id & NIU_PHY_ID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2089) 		case NIU_PHY_ID_MRVL88X2011:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2090) 			err = link_status_10g_mrvl(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2091) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2093) 		default: /* bcom 8704 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2094) 			err = link_status_10g_bcom(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2095) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2096) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2099) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2101) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2104) static int niu_10g_phy_present(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2106) 	u64 sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2108) 	sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2109) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2110) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2111) 		mask = ESR_INT_SIGNALS_P0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2112) 		val = (ESR_INT_SRDY0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2113) 		       ESR_INT_DET0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2114) 		       ESR_INT_XSRDY_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2115) 		       ESR_INT_XDP_P0_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2116) 		       ESR_INT_XDP_P0_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2117) 		       ESR_INT_XDP_P0_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2118) 		       ESR_INT_XDP_P0_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2121) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2122) 		mask = ESR_INT_SIGNALS_P1_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2123) 		val = (ESR_INT_SRDY0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2124) 		       ESR_INT_DET0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2125) 		       ESR_INT_XSRDY_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2126) 		       ESR_INT_XDP_P1_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2127) 		       ESR_INT_XDP_P1_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2128) 		       ESR_INT_XDP_P1_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2129) 		       ESR_INT_XDP_P1_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2130) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2132) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2133) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2136) 	if ((sig & mask) != val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2137) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2138) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2141) static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2143) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2144) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2145) 	int phy_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2146) 	int phy_present_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2148) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2150) 	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2151) 		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2152) 			1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2153) 		phy_present = niu_10g_phy_present(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2154) 		if (phy_present != phy_present_prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2155) 			/* state change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2156) 			if (phy_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2157) 				/* A NEM was just plugged in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2158) 				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2159) 				if (np->phy_ops->xcvr_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2160) 					err = np->phy_ops->xcvr_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2161) 				if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2162) 					err = mdio_read(np, np->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2163) 						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2164) 					if (err == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2165) 						/* No mdio, back-to-back XAUI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2166) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2167) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2168) 					/* debounce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2169) 					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2170) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2171) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2172) 				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2173) 				*link_up_p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2174) 				netif_warn(np, link, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2175) 					   "Hotplug PHY Removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2176) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2178) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2179) 		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2180) 			err = link_status_10g_bcm8706(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2181) 			if (err == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2182) 				/* No mdio, back-to-back XAUI: it is C10NEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2183) 				*link_up_p = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2184) 				np->link_config.active_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2185) 				np->link_config.active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2186) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2190) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2192) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2195) static int niu_link_status(struct niu *np, int *link_up_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2197) 	const struct niu_phy_ops *ops = np->phy_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2198) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2200) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2201) 	if (ops->link_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2202) 		err = ops->link_status(np, link_up_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2204) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2207) static void niu_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2209) 	struct niu *np = from_timer(np, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2210) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2211) 	int err, link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2213) 	err = niu_link_status(np, &link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2214) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2215) 		niu_link_status_common(np, link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2217) 	if (netif_carrier_ok(np->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2218) 		off = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2219) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2220) 		off = 1 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2221) 	np->timer.expires = jiffies + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2223) 	add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2226) static const struct niu_phy_ops phy_ops_10g_serdes = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2227) 	.serdes_init		= serdes_init_10g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2228) 	.link_status		= link_status_10g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2231) static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2232) 	.serdes_init		= serdes_init_niu_10g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2233) 	.link_status		= link_status_10g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2236) static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2237) 	.serdes_init		= serdes_init_niu_1g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2238) 	.link_status		= link_status_1g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2241) static const struct niu_phy_ops phy_ops_1g_rgmii = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2242) 	.xcvr_init		= xcvr_init_1g_rgmii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2243) 	.link_status		= link_status_1g_rgmii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2246) static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2247) 	.serdes_init		= serdes_init_niu_10g_fiber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2248) 	.xcvr_init		= xcvr_init_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2249) 	.link_status		= link_status_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2252) static const struct niu_phy_ops phy_ops_10g_fiber = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2253) 	.serdes_init		= serdes_init_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2254) 	.xcvr_init		= xcvr_init_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2255) 	.link_status		= link_status_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2258) static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2259) 	.serdes_init		= serdes_init_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2260) 	.xcvr_init		= xcvr_init_10g_bcm8706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2261) 	.link_status		= link_status_10g_hotplug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2264) static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2265) 	.serdes_init		= serdes_init_niu_10g_fiber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2266) 	.xcvr_init		= xcvr_init_10g_bcm8706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2267) 	.link_status		= link_status_10g_hotplug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2270) static const struct niu_phy_ops phy_ops_10g_copper = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2271) 	.serdes_init		= serdes_init_10g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2272) 	.link_status		= link_status_10g, /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2275) static const struct niu_phy_ops phy_ops_1g_fiber = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2276) 	.serdes_init		= serdes_init_1g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2277) 	.xcvr_init		= xcvr_init_1g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2278) 	.link_status		= link_status_1g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2281) static const struct niu_phy_ops phy_ops_1g_copper = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2282) 	.xcvr_init		= xcvr_init_1g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2283) 	.link_status		= link_status_1g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2286) struct niu_phy_template {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2287) 	const struct niu_phy_ops	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2288) 	u32				phy_addr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2291) static const struct niu_phy_template phy_template_niu_10g_fiber = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2292) 	.ops		= &phy_ops_10g_fiber_niu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2293) 	.phy_addr_base	= 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2296) static const struct niu_phy_template phy_template_niu_10g_serdes = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2297) 	.ops		= &phy_ops_10g_serdes_niu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2298) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2301) static const struct niu_phy_template phy_template_niu_1g_serdes = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2302) 	.ops		= &phy_ops_1g_serdes_niu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2303) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2306) static const struct niu_phy_template phy_template_10g_fiber = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2307) 	.ops		= &phy_ops_10g_fiber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2308) 	.phy_addr_base	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2311) static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2312) 	.ops		= &phy_ops_10g_fiber_hotplug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2313) 	.phy_addr_base	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2316) static const struct niu_phy_template phy_template_niu_10g_hotplug = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2317) 	.ops		= &phy_ops_niu_10g_hotplug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2318) 	.phy_addr_base	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2321) static const struct niu_phy_template phy_template_10g_copper = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2322) 	.ops		= &phy_ops_10g_copper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2323) 	.phy_addr_base	= 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2326) static const struct niu_phy_template phy_template_1g_fiber = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2327) 	.ops		= &phy_ops_1g_fiber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2328) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2331) static const struct niu_phy_template phy_template_1g_copper = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2332) 	.ops		= &phy_ops_1g_copper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2333) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2336) static const struct niu_phy_template phy_template_1g_rgmii = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2337) 	.ops		= &phy_ops_1g_rgmii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2338) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2341) static const struct niu_phy_template phy_template_10g_serdes = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2342) 	.ops		= &phy_ops_10g_serdes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2343) 	.phy_addr_base	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2346) static int niu_atca_port_num[4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2347) 	0, 0,  11, 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2350) static int serdes_init_10g_serdes(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2352) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2353) 	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2354) 	u64 ctrl_val, test_cfg_val, sig, mask, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2356) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2357) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2358) 		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2359) 		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2360) 		pll_cfg = ENET_SERDES_0_PLL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2361) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2362) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2363) 		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2364) 		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2365) 		pll_cfg = ENET_SERDES_1_PLL_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2368) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2369) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2371) 	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2372) 		    ENET_SERDES_CTRL_SDET_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2373) 		    ENET_SERDES_CTRL_SDET_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2374) 		    ENET_SERDES_CTRL_SDET_3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2375) 		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2376) 		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2377) 		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2378) 		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2379) 		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2380) 		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2381) 		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2382) 		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2383) 	test_cfg_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2385) 	if (lp->loopback_mode == LOOPBACK_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2386) 		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2387) 				  ENET_SERDES_TEST_MD_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2388) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2389) 				  ENET_SERDES_TEST_MD_1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2390) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2391) 				  ENET_SERDES_TEST_MD_2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2392) 				 (ENET_TEST_MD_PAD_LOOPBACK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2393) 				  ENET_SERDES_TEST_MD_3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2396) 	esr_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2397) 	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2398) 	nw64(ctrl_reg, ctrl_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2399) 	nw64(test_cfg_reg, test_cfg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2401) 	/* Initialize all 4 lanes of the SERDES.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2402) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2403) 		u32 rxtx_ctrl, glue0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2404) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2406) 		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2407) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2408) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2409) 		err = esr_read_glue0(np, i, &glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2410) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2411) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2413) 		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2414) 		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2415) 			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2417) 		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2418) 			   ESR_GLUE_CTRL0_THCNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2419) 			   ESR_GLUE_CTRL0_BLTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2420) 		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2421) 			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2422) 			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2423) 			  (BLTIME_300_CYCLES <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2424) 			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2426) 		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2427) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2428) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2429) 		err = esr_write_glue0(np, i, glue0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2430) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2431) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2435) 	sig = nr64(ESR_INT_SIGNALS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2436) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2437) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2438) 		mask = ESR_INT_SIGNALS_P0_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2439) 		val = (ESR_INT_SRDY0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2440) 		       ESR_INT_DET0_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2441) 		       ESR_INT_XSRDY_P0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2442) 		       ESR_INT_XDP_P0_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2443) 		       ESR_INT_XDP_P0_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2444) 		       ESR_INT_XDP_P0_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2445) 		       ESR_INT_XDP_P0_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2446) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2448) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2449) 		mask = ESR_INT_SIGNALS_P1_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2450) 		val = (ESR_INT_SRDY0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2451) 		       ESR_INT_DET0_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2452) 		       ESR_INT_XSRDY_P1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2453) 		       ESR_INT_XDP_P1_CH3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2454) 		       ESR_INT_XDP_P1_CH2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2455) 		       ESR_INT_XDP_P1_CH1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2456) 		       ESR_INT_XDP_P1_CH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2457) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2459) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2460) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2463) 	if ((sig & mask) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2464) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2465) 		err = serdes_init_1g_serdes(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2466) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2467) 			np->flags &= ~NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2468) 			np->mac_xcvr = MAC_XCVR_PCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2469) 		}  else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2470) 			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2471) 				   np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2472) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2479) static int niu_determine_phy_disposition(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2481) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2482) 	u8 plat_type = parent->plat_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2483) 	const struct niu_phy_template *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2484) 	u32 phy_addr_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2486) 	if (plat_type == PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2487) 		switch (np->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2488) 			(NIU_FLAGS_10G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2489) 			 NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2490) 			 NIU_FLAGS_XCVR_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2491) 		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2492) 			/* 10G Serdes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2493) 			tp = &phy_template_niu_10g_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2494) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2495) 		case NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2496) 			/* 1G Serdes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2497) 			tp = &phy_template_niu_1g_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2498) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2499) 		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2500) 			/* 10G Fiber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2501) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2502) 			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2503) 				tp = &phy_template_niu_10g_hotplug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2504) 				if (np->port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2505) 					phy_addr_off = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2506) 				if (np->port == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2507) 					phy_addr_off = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2508) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2509) 				tp = &phy_template_niu_10g_fiber;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2510) 				phy_addr_off += np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2511) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2512) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2513) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2514) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2515) 		switch (np->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2516) 			(NIU_FLAGS_10G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2517) 			 NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2518) 			 NIU_FLAGS_XCVR_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2519) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2520) 			/* 1G copper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2521) 			tp = &phy_template_1g_copper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2522) 			if (plat_type == PLAT_TYPE_VF_P0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2523) 				phy_addr_off = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2524) 			else if (plat_type == PLAT_TYPE_VF_P1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2525) 				phy_addr_off = 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2527) 			phy_addr_off += (np->port ^ 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2528) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2530) 		case NIU_FLAGS_10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2531) 			/* 10G copper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2532) 			tp = &phy_template_10g_copper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2533) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2535) 		case NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2536) 			/* 1G fiber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2537) 			tp = &phy_template_1g_fiber;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2540) 		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2541) 			/* 10G fiber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2542) 			tp = &phy_template_10g_fiber;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2543) 			if (plat_type == PLAT_TYPE_VF_P0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2544) 			    plat_type == PLAT_TYPE_VF_P1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2545) 				phy_addr_off = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2546) 			phy_addr_off += np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2547) 			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2548) 				tp = &phy_template_10g_fiber_hotplug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2549) 				if (np->port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2550) 					phy_addr_off = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2551) 				if (np->port == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2552) 					phy_addr_off = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2553) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2556) 		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2557) 		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2558) 		case NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2559) 			switch(np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2560) 			case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2561) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2562) 				tp = &phy_template_10g_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2563) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2564) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2565) 			case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2566) 				tp = &phy_template_1g_rgmii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2567) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2568) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2569) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2570) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2571) 			phy_addr_off = niu_atca_port_num[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2572) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2574) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2575) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2579) 	np->phy_ops = tp->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2580) 	np->phy_addr = tp->phy_addr_base + phy_addr_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2585) static int niu_init_link(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2587) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2588) 	int err, ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2590) 	if (parent->plat_type == PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2591) 		err = niu_xcvr_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2592) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2593) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2594) 		msleep(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2596) 	err = niu_serdes_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2597) 	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2598) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2599) 	msleep(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2600) 	err = niu_xcvr_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2601) 	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2602) 		niu_link_status(np, &ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2603) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2606) static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2608) 	u16 reg0 = addr[4] << 8 | addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2609) 	u16 reg1 = addr[2] << 8 | addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2610) 	u16 reg2 = addr[0] << 8 | addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2612) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2613) 		nw64_mac(XMAC_ADDR0, reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2614) 		nw64_mac(XMAC_ADDR1, reg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2615) 		nw64_mac(XMAC_ADDR2, reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2616) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2617) 		nw64_mac(BMAC_ADDR0, reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2618) 		nw64_mac(BMAC_ADDR1, reg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2619) 		nw64_mac(BMAC_ADDR2, reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2623) static int niu_num_alt_addr(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2625) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2626) 		return XMAC_NUM_ALT_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2627) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2628) 		return BMAC_NUM_ALT_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2631) static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2633) 	u16 reg0 = addr[4] << 8 | addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2634) 	u16 reg1 = addr[2] << 8 | addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2635) 	u16 reg2 = addr[0] << 8 | addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2637) 	if (index >= niu_num_alt_addr(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2638) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2640) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2641) 		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2642) 		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2643) 		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2644) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2645) 		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2646) 		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2647) 		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2650) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2653) static int niu_enable_alt_mac(struct niu *np, int index, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2655) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2656) 	u64 val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2658) 	if (index >= niu_num_alt_addr(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2659) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2661) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2662) 		reg = XMAC_ADDR_CMPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2663) 		mask = 1 << index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2664) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2665) 		reg = BMAC_ADDR_CMPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2666) 		mask = 1 << (index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2669) 	val = nr64_mac(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2670) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2671) 		val |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2672) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2673) 		val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2674) 	nw64_mac(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2676) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2679) static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2680) 				   int num, int mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2682) 	u64 val = nr64_mac(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2683) 	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2684) 	val |= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2685) 	if (mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2686) 		val |= HOST_INFO_MPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2687) 	nw64_mac(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2690) static int __set_rdc_table_num(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2691) 			       int xmac_index, int bmac_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2692) 			       int rdc_table_num, int mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2694) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2696) 	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2697) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2698) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2699) 		reg = XMAC_HOST_INFO(xmac_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2700) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2701) 		reg = BMAC_HOST_INFO(bmac_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2702) 	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2703) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2706) static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2707) 					 int mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2709) 	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2712) static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2713) 					   int mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2715) 	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2718) static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2719) 				     int table_num, int mac_pref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2721) 	if (idx >= niu_num_alt_addr(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2722) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2723) 	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2726) static u64 vlan_entry_set_parity(u64 reg_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2728) 	u64 port01_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2729) 	u64 port23_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2731) 	port01_mask = 0x00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2732) 	port23_mask = 0xff00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2734) 	if (hweight64(reg_val & port01_mask) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2735) 		reg_val |= ENET_VLAN_TBL_PARITY0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2736) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2737) 		reg_val &= ~ENET_VLAN_TBL_PARITY0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2739) 	if (hweight64(reg_val & port23_mask) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2740) 		reg_val |= ENET_VLAN_TBL_PARITY1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2741) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2742) 		reg_val &= ~ENET_VLAN_TBL_PARITY1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2744) 	return reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2747) static void vlan_tbl_write(struct niu *np, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2748) 			   int port, int vpr, int rdc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2750) 	u64 reg_val = nr64(ENET_VLAN_TBL(index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2752) 	reg_val &= ~((ENET_VLAN_TBL_VPR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2753) 		      ENET_VLAN_TBL_VLANRDCTBLN) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2754) 		     ENET_VLAN_TBL_SHIFT(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2755) 	if (vpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2756) 		reg_val |= (ENET_VLAN_TBL_VPR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2757) 			    ENET_VLAN_TBL_SHIFT(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2758) 	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2760) 	reg_val = vlan_entry_set_parity(reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2762) 	nw64(ENET_VLAN_TBL(index), reg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2765) static void vlan_tbl_clear(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2767) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2769) 	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2770) 		nw64(ENET_VLAN_TBL(i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2773) static int tcam_wait_bit(struct niu *np, u64 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2775) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2777) 	while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2778) 		if (nr64(TCAM_CTL) & bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2779) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2780) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2782) 	if (limit <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2783) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2785) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2788) static int tcam_flush(struct niu *np, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2790) 	nw64(TCAM_KEY_0, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2791) 	nw64(TCAM_KEY_MASK_0, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2792) 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2794) 	return tcam_wait_bit(np, TCAM_CTL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2797) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2798) static int tcam_read(struct niu *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2799) 		     u64 *key, u64 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2801) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2803) 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2804) 	err = tcam_wait_bit(np, TCAM_CTL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2805) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2806) 		key[0] = nr64(TCAM_KEY_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2807) 		key[1] = nr64(TCAM_KEY_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2808) 		key[2] = nr64(TCAM_KEY_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2809) 		key[3] = nr64(TCAM_KEY_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2810) 		mask[0] = nr64(TCAM_KEY_MASK_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2811) 		mask[1] = nr64(TCAM_KEY_MASK_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2812) 		mask[2] = nr64(TCAM_KEY_MASK_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2813) 		mask[3] = nr64(TCAM_KEY_MASK_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2815) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2819) static int tcam_write(struct niu *np, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2820) 		      u64 *key, u64 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2822) 	nw64(TCAM_KEY_0, key[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2823) 	nw64(TCAM_KEY_1, key[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2824) 	nw64(TCAM_KEY_2, key[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2825) 	nw64(TCAM_KEY_3, key[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2826) 	nw64(TCAM_KEY_MASK_0, mask[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2827) 	nw64(TCAM_KEY_MASK_1, mask[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2828) 	nw64(TCAM_KEY_MASK_2, mask[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2829) 	nw64(TCAM_KEY_MASK_3, mask[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2830) 	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2832) 	return tcam_wait_bit(np, TCAM_CTL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2835) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2836) static int tcam_assoc_read(struct niu *np, int index, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2838) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2840) 	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2841) 	err = tcam_wait_bit(np, TCAM_CTL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2842) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2843) 		*data = nr64(TCAM_KEY_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2845) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2847) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2849) static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2851) 	nw64(TCAM_KEY_1, assoc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2852) 	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2854) 	return tcam_wait_bit(np, TCAM_CTL_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2857) static void tcam_enable(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2859) 	u64 val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2861) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2862) 		val &= ~FFLP_CFG_1_TCAM_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2863) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2864) 		val |= FFLP_CFG_1_TCAM_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2865) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2868) static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2870) 	u64 val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2872) 	val &= ~(FFLP_CFG_1_FFLPINITDONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2873) 		 FFLP_CFG_1_CAMLAT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2874) 		 FFLP_CFG_1_CAMRATIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2875) 	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2876) 	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2877) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2879) 	val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2880) 	val |= FFLP_CFG_1_FFLPINITDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2881) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2884) static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2885) 				      int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2887) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2888) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2890) 	if (class < CLASS_CODE_ETHERTYPE1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2891) 	    class > CLASS_CODE_ETHERTYPE2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2892) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2894) 	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2895) 	val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2896) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2897) 		val |= L2_CLS_VLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2898) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2899) 		val &= ~L2_CLS_VLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2900) 	nw64(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2902) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2905) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2906) static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2907) 				   u64 ether_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2909) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2910) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2912) 	if (class < CLASS_CODE_ETHERTYPE1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2913) 	    class > CLASS_CODE_ETHERTYPE2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2914) 	    (ether_type & ~(u64)0xffff) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2915) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2917) 	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2918) 	val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2919) 	val &= ~L2_CLS_ETYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2920) 	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2921) 	nw64(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2923) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2925) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2927) static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2928) 				     int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2930) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2931) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2933) 	if (class < CLASS_CODE_USER_PROG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2934) 	    class > CLASS_CODE_USER_PROG4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2935) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2937) 	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2938) 	val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2939) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2940) 		val |= L3_CLS_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2941) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2942) 		val &= ~L3_CLS_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2943) 	nw64(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2945) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2948) static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2949) 				  int ipv6, u64 protocol_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2950) 				  u64 tos_mask, u64 tos_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2952) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2953) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2955) 	if (class < CLASS_CODE_USER_PROG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2956) 	    class > CLASS_CODE_USER_PROG4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2957) 	    (protocol_id & ~(u64)0xff) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2958) 	    (tos_mask & ~(u64)0xff) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2959) 	    (tos_val & ~(u64)0xff) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2960) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2962) 	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2963) 	val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2964) 	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2965) 		 L3_CLS_TOSMASK | L3_CLS_TOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2966) 	if (ipv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2967) 		val |= L3_CLS_IPVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2968) 	val |= (protocol_id << L3_CLS_PID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2969) 	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2970) 	val |= (tos_val << L3_CLS_TOS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2971) 	nw64(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2973) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2976) static int tcam_early_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2978) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2979) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2981) 	tcam_enable(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2982) 	tcam_set_lat_and_ratio(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2983) 			       DEFAULT_TCAM_LATENCY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2984) 			       DEFAULT_TCAM_ACCESS_RATIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2985) 	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2986) 		err = tcam_user_eth_class_enable(np, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2987) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2988) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2990) 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2991) 		err = tcam_user_ip_class_enable(np, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2992) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2993) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2996) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2999) static int tcam_flush_all(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3001) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3003) 	for (i = 0; i < np->parent->tcam_num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3004) 		int err = tcam_flush(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3005) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3006) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3008) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3011) static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3013) 	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3016) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3017) static int hash_read(struct niu *np, unsigned long partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3018) 		     unsigned long index, unsigned long num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3019) 		     u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3021) 	u64 val = hash_addr_regval(index, num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3022) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3024) 	if (partition >= FCRAM_NUM_PARTITIONS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3025) 	    index + num_entries > FCRAM_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3026) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3028) 	nw64(HASH_TBL_ADDR(partition), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3029) 	for (i = 0; i < num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3030) 		data[i] = nr64(HASH_TBL_DATA(partition));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3032) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3034) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3036) static int hash_write(struct niu *np, unsigned long partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3037) 		      unsigned long index, unsigned long num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3038) 		      u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3040) 	u64 val = hash_addr_regval(index, num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3041) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3043) 	if (partition >= FCRAM_NUM_PARTITIONS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3044) 	    index + (num_entries * 8) > FCRAM_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3045) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3047) 	nw64(HASH_TBL_ADDR(partition), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3048) 	for (i = 0; i < num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3049) 		nw64(HASH_TBL_DATA(partition), data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3051) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3054) static void fflp_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3056) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3058) 	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3059) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3060) 	nw64(FFLP_CFG_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3062) 	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3063) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3066) static void fflp_set_timings(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3068) 	u64 val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3070) 	val &= ~FFLP_CFG_1_FFLPINITDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3071) 	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3072) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3074) 	val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3075) 	val |= FFLP_CFG_1_FFLPINITDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3076) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3078) 	val = nr64(FCRAM_REF_TMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3079) 	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3080) 	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3081) 	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3082) 	nw64(FCRAM_REF_TMR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3085) static int fflp_set_partition(struct niu *np, u64 partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3086) 			      u64 mask, u64 base, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3088) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3089) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3091) 	if (partition >= FCRAM_NUM_PARTITIONS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3092) 	    (mask & ~(u64)0x1f) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3093) 	    (base & ~(u64)0x1f) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3094) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3096) 	reg = FLW_PRT_SEL(partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3098) 	val = nr64(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3099) 	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3100) 	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3101) 	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3102) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3103) 		val |= FLW_PRT_SEL_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3104) 	nw64(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3106) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3109) static int fflp_disable_all_partitions(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3111) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3113) 	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3114) 		int err = fflp_set_partition(np, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3115) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3116) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3118) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3121) static void fflp_llcsnap_enable(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3123) 	u64 val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3125) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3126) 		val |= FFLP_CFG_1_LLCSNAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3127) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3128) 		val &= ~FFLP_CFG_1_LLCSNAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3129) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3132) static void fflp_errors_enable(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3134) 	u64 val = nr64(FFLP_CFG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3136) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3137) 		val &= ~FFLP_CFG_1_ERRORDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3138) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3139) 		val |= FFLP_CFG_1_ERRORDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3140) 	nw64(FFLP_CFG_1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3143) static int fflp_hash_clear(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3145) 	struct fcram_hash_ipv4 ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3146) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3148) 	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3149) 	memset(&ent, 0, sizeof(ent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3150) 	ent.header = HASH_HEADER_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3152) 	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3153) 		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3154) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3155) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3157) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3160) static int fflp_early_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3162) 	struct niu_parent *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3163) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3164) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3166) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3168) 	parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3169) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3170) 	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3171) 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3172) 			fflp_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3173) 			fflp_set_timings(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3174) 			err = fflp_disable_all_partitions(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3175) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3176) 				netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3177) 					     "fflp_disable_all_partitions failed, err=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3178) 					     err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3179) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3180) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3183) 		err = tcam_early_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3184) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3185) 			netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3186) 				     "tcam_early_init failed, err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3187) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3189) 		fflp_llcsnap_enable(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3190) 		fflp_errors_enable(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3191) 		nw64(H1POLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3192) 		nw64(H2POLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3194) 		err = tcam_flush_all(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3195) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3196) 			netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3197) 				     "tcam_flush_all failed, err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3198) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3200) 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3201) 			err = fflp_hash_clear(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3202) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3203) 				netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3204) 					     "fflp_hash_clear failed, err=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3205) 					     err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3206) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3207) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3208) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3210) 		vlan_tbl_clear(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3212) 		parent->flags |= PARENT_FLGS_CLS_HWINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3214) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3215) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3216) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3219) static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3221) 	if (class_code < CLASS_CODE_USER_PROG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3222) 	    class_code > CLASS_CODE_SCTP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3223) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3225) 	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3229) static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3231) 	if (class_code < CLASS_CODE_USER_PROG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3232) 	    class_code > CLASS_CODE_SCTP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3233) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3235) 	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3239) /* Entries for the ports are interleaved in the TCAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3240) static u16 tcam_get_index(struct niu *np, u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3242) 	/* One entry reserved for IP fragment rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3243) 	if (idx >= (np->clas.tcam_sz - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3244) 		idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3245) 	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3248) static u16 tcam_get_size(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3250) 	/* One entry reserved for IP fragment rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3251) 	return np->clas.tcam_sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3254) static u16 tcam_get_valid_entry_cnt(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3256) 	/* One entry reserved for IP fragment rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3257) 	return np->clas.tcam_valid_entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3260) static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3261) 			      u32 offset, u32 size, u32 truesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3263) 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3265) 	skb->len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3266) 	skb->data_len += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3267) 	skb->truesize += truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3270) static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3272) 	a >>= PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3273) 	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3275) 	return a & (MAX_RBR_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3278) static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3279) 				    struct page ***link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3281) 	unsigned int h = niu_hash_rxaddr(rp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3282) 	struct page *p, **pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3284) 	addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3285) 	pp = &rp->rxhash[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3286) 	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3287) 		if (p->index == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3288) 			*link = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3289) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3292) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3294) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3295) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3298) static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3300) 	unsigned int h = niu_hash_rxaddr(rp, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3302) 	page->index = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3303) 	page->mapping = (struct address_space *) rp->rxhash[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3304) 	rp->rxhash[h] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3307) static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3308) 			    gfp_t mask, int start_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3310) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3311) 	u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3312) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3314) 	page = alloc_page(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3315) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3316) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3318) 	addr = np->ops->map_page(np->device, page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3319) 				 PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3320) 	if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3321) 		__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3322) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3325) 	niu_hash_page(rp, page, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3326) 	if (rp->rbr_blocks_per_page > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3327) 		page_ref_add(page, rp->rbr_blocks_per_page - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3329) 	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3330) 		__le32 *rbr = &rp->rbr[start_index + i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3332) 		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3333) 		addr += rp->rbr_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3336) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3339) static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3341) 	int index = rp->rbr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3343) 	rp->rbr_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3344) 	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3345) 		int err = niu_rbr_add_page(np, rp, mask, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3347) 		if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3348) 			rp->rbr_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3349) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3352) 		rp->rbr_index += rp->rbr_blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3353) 		BUG_ON(rp->rbr_index > rp->rbr_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3354) 		if (rp->rbr_index == rp->rbr_table_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3355) 			rp->rbr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3357) 		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3358) 			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3359) 			rp->rbr_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3364) static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3366) 	unsigned int index = rp->rcr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3367) 	int num_rcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3369) 	rp->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3370) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3371) 		struct page *page, **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3372) 		u64 addr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3373) 		u32 rcr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3375) 		num_rcr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3377) 		val = le64_to_cpup(&rp->rcr[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3378) 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3379) 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3380) 		page = niu_find_rxpage(rp, addr, &link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3382) 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3383) 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3384) 		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3385) 			*link = (struct page *) page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3386) 			np->ops->unmap_page(np->device, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3387) 					    PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3388) 			page->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3389) 			page->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3390) 			__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3391) 			rp->rbr_refill_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3394) 		index = NEXT_RCR(rp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3395) 		if (!(val & RCR_ENTRY_MULTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3396) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3399) 	rp->rcr_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3401) 	return num_rcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3404) static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3405) 			      struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3407) 	unsigned int index = rp->rcr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3408) 	struct rx_pkt_hdr1 *rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3409) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3410) 	int len, num_rcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3412) 	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3413) 	if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3414) 		return niu_rx_pkt_ignore(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3416) 	num_rcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3417) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3418) 		struct page *page, **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3419) 		u32 rcr_size, append_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3420) 		u64 addr, val, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3422) 		num_rcr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3424) 		val = le64_to_cpup(&rp->rcr[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3426) 		len = (val & RCR_ENTRY_L2_LEN) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3427) 			RCR_ENTRY_L2_LEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3428) 		append_size = len + ETH_HLEN + ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3430) 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3431) 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3432) 		page = niu_find_rxpage(rp, addr, &link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3434) 		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3435) 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3437) 		off = addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3438) 		if (num_rcr == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3439) 			int ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3441) 			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3442) 			if ((ptype == RCR_PKT_TYPE_TCP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3443) 			     ptype == RCR_PKT_TYPE_UDP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3444) 			    !(val & (RCR_ENTRY_NOPORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3445) 				     RCR_ENTRY_ERROR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3446) 				skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3447) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3448) 				skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3449) 		} else if (!(val & RCR_ENTRY_MULTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3450) 			append_size = append_size - skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3452) 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3453) 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3454) 			*link = (struct page *) page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3455) 			np->ops->unmap_page(np->device, page->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3456) 					    PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3457) 			page->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3458) 			page->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3459) 			rp->rbr_refill_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3460) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3461) 			get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3463) 		index = NEXT_RCR(rp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3464) 		if (!(val & RCR_ENTRY_MULTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3465) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3468) 	rp->rcr_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3470) 	len += sizeof(*rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3471) 	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3472) 	__pskb_pull_tail(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3474) 	rh = (struct rx_pkt_hdr1 *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3475) 	if (np->dev->features & NETIF_F_RXHASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3476) 		skb_set_hash(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3477) 			     ((u32)rh->hashval2_0 << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3478) 			      (u32)rh->hashval2_1 << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3479) 			      (u32)rh->hashval1_1 << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3480) 			      (u32)rh->hashval1_2 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3481) 			     PKT_HASH_TYPE_L3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3482) 	skb_pull(skb, sizeof(*rh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3484) 	rp->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3485) 	rp->rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3487) 	skb->protocol = eth_type_trans(skb, np->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3488) 	skb_record_rx_queue(skb, rp->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3489) 	napi_gro_receive(napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3491) 	return num_rcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3494) static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3496) 	int blocks_per_page = rp->rbr_blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3497) 	int err, index = rp->rbr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3499) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3500) 	while (index < (rp->rbr_table_size - blocks_per_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3501) 		err = niu_rbr_add_page(np, rp, mask, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3502) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3503) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3505) 		index += blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3508) 	rp->rbr_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3509) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3512) static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3514) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3516) 	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3517) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3519) 		page = rp->rxhash[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3520) 		while (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3521) 			struct page *next = (struct page *) page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3522) 			u64 base = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3524) 			np->ops->unmap_page(np->device, base, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3525) 					    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3526) 			page->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3527) 			page->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3529) 			__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3531) 			page = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3535) 	for (i = 0; i < rp->rbr_table_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3536) 		rp->rbr[i] = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3537) 	rp->rbr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3540) static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3542) 	struct tx_buff_info *tb = &rp->tx_buffs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3543) 	struct sk_buff *skb = tb->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3544) 	struct tx_pkt_hdr *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3545) 	u64 tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3546) 	int i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3548) 	tp = (struct tx_pkt_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3549) 	tx_flags = le64_to_cpup(&tp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3551) 	rp->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3552) 	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3553) 			 ((tx_flags & TXHDR_PAD) / 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3555) 	len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3556) 	np->ops->unmap_single(np->device, tb->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3557) 			      len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3559) 	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3560) 		rp->mark_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3562) 	tb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3563) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3564) 		idx = NEXT_TX(rp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3565) 		len -= MAX_TX_DESC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3566) 	} while (len > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3568) 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3569) 		tb = &rp->tx_buffs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3570) 		BUG_ON(tb->skb != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3571) 		np->ops->unmap_page(np->device, tb->mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3572) 				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3573) 				    DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3574) 		idx = NEXT_TX(rp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3577) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3579) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3582) #define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3584) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3586) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3587) 	u16 pkt_cnt, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3588) 	int cons, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3589) 	u64 cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3591) 	index = (rp - np->tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3592) 	txq = netdev_get_tx_queue(np->dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3594) 	cs = rp->tx_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3595) 	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3596) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3598) 	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3599) 	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3600) 		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3602) 	rp->last_pkt_cnt = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3604) 	cons = rp->cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3606) 	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3607) 		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3609) 	while (pkt_cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3610) 		cons = release_tx_packet(np, rp, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3612) 	rp->cons = cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3613) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3616) 	if (unlikely(netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3617) 		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3618) 		__netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3619) 		if (netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3620) 		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3621) 			netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3622) 		__netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3626) static inline void niu_sync_rx_discard_stats(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3627) 					     struct rx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3628) 					     const int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3630) 	/* This elaborate scheme is needed for reading the RX discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3631) 	 * counters, as they are only 16-bit and can overflow quickly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3632) 	 * and because the overflow indication bit is not usable as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3633) 	 * the counter value does not wrap, but remains at max value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3634) 	 * 0xFFFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3635) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3636) 	 * In theory and in practice counters can be lost in between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3637) 	 * reading nr64() and clearing the counter nw64().  For this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3638) 	 * reason, the number of counter clearings nw64() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3639) 	 * limited/reduced though the limit parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3640) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3641) 	int rx_channel = rp->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3642) 	u32 misc, wred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3644) 	/* RXMISC (Receive Miscellaneous Discard Count), covers the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3645) 	 * following discard events: IPP (Input Port Process),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3646) 	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3647) 	 * Block Ring) prefetch buffer is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3648) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3649) 	misc = nr64(RXMISC(rx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3650) 	if (unlikely((misc & RXMISC_COUNT) > limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3651) 		nw64(RXMISC(rx_channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3652) 		rp->rx_errors += misc & RXMISC_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3654) 		if (unlikely(misc & RXMISC_OFLOW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3655) 			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3656) 				rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3658) 		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3659) 			     "rx-%d: MISC drop=%u over=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3660) 			     rx_channel, misc, misc-limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3663) 	/* WRED (Weighted Random Early Discard) by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3664) 	wred = nr64(RED_DIS_CNT(rx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3665) 	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3666) 		nw64(RED_DIS_CNT(rx_channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3667) 		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3669) 		if (unlikely(wred & RED_DIS_CNT_OFLOW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3670) 			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3672) 		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3673) 			     "rx-%d: WRED drop=%u over=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3674) 			     rx_channel, wred, wred-limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3678) static int niu_rx_work(struct napi_struct *napi, struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3679) 		       struct rx_ring_info *rp, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3681) 	int qlen, rcr_done = 0, work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3682) 	struct rxdma_mailbox *mbox = rp->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3683) 	u64 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3685) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3686) 	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3687) 	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3688) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3689) 	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3690) 	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3692) 	mbox->rx_dma_ctl_stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3693) 	mbox->rcrstat_a = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3695) 	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3696) 		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3697) 		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3699) 	rcr_done = work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3700) 	qlen = min(qlen, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3701) 	while (work_done < qlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3702) 		rcr_done += niu_process_rx_pkt(napi, np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3703) 		work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3706) 	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3707) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3709) 		for (i = 0; i < rp->rbr_refill_pending; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3710) 			niu_rbr_refill(np, rp, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3711) 		rp->rbr_refill_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3714) 	stat = (RX_DMA_CTL_STAT_MEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3715) 		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3716) 		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3718) 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3720) 	/* Only sync discards stats when qlen indicate potential for drops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3721) 	if (qlen > 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3722) 		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3724) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3727) static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3729) 	u64 v0 = lp->v0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3730) 	u32 tx_vec = (v0 >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3731) 	u32 rx_vec = (v0 & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3732) 	int i, work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3734) 	netif_printk(np, intr, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3735) 		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3737) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3738) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3739) 		if (tx_vec & (1 << rp->tx_channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3740) 			niu_tx_work(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3741) 		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3744) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3745) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3747) 		if (rx_vec & (1 << rp->rx_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3748) 			int this_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3750) 			this_work_done = niu_rx_work(&lp->napi, np, rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3751) 						     budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3753) 			budget -= this_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3754) 			work_done += this_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3755) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3756) 		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3759) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3762) static int niu_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3764) 	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3765) 	struct niu *np = lp->np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3766) 	int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3768) 	work_done = niu_poll_core(np, lp, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3770) 	if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3771) 		napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3772) 		niu_ldg_rearm(np, lp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3774) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3777) static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3778) 				  u64 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3780) 	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3782) 	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3783) 		pr_cont("RBR_TMOUT ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3784) 	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3785) 		pr_cont("RSP_CNT ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3786) 	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3787) 		pr_cont("BYTE_EN_BUS ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3788) 	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3789) 		pr_cont("RSP_DAT ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3790) 	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3791) 		pr_cont("RCR_ACK ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3792) 	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3793) 		pr_cont("RCR_SHA_PAR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3794) 	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3795) 		pr_cont("RBR_PRE_PAR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3796) 	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3797) 		pr_cont("CONFIG ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3798) 	if (stat & RX_DMA_CTL_STAT_RCRINCON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3799) 		pr_cont("RCRINCON ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3800) 	if (stat & RX_DMA_CTL_STAT_RCRFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3801) 		pr_cont("RCRFULL ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3802) 	if (stat & RX_DMA_CTL_STAT_RBRFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3803) 		pr_cont("RBRFULL ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3804) 	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3805) 		pr_cont("RBRLOGPAGE ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3806) 	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3807) 		pr_cont("CFIGLOGPAGE ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3808) 	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3809) 		pr_cont("DC_FIDO ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3811) 	pr_cont(")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3814) static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3816) 	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3817) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3820) 	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3821) 		    RX_DMA_CTL_STAT_PORT_FATAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3822) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3824) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3825) 		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3826) 			   rp->rx_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3827) 			   (unsigned long long) stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3829) 		niu_log_rxchan_errors(np, rp, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3832) 	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3833) 	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3835) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3838) static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3839) 				  u64 cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3841) 	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3843) 	if (cs & TX_CS_MBOX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3844) 		pr_cont("MBOX ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3845) 	if (cs & TX_CS_PKT_SIZE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3846) 		pr_cont("PKT_SIZE ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3847) 	if (cs & TX_CS_TX_RING_OFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3848) 		pr_cont("TX_RING_OFLOW ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3849) 	if (cs & TX_CS_PREF_BUF_PAR_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3850) 		pr_cont("PREF_BUF_PAR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3851) 	if (cs & TX_CS_NACK_PREF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3852) 		pr_cont("NACK_PREF ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3853) 	if (cs & TX_CS_NACK_PKT_RD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3854) 		pr_cont("NACK_PKT_RD ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3855) 	if (cs & TX_CS_CONF_PART_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3856) 		pr_cont("CONF_PART ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3857) 	if (cs & TX_CS_PKT_PRT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3858) 		pr_cont("PKT_PTR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3860) 	pr_cont(")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3863) static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3865) 	u64 cs, logh, logl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3867) 	cs = nr64(TX_CS(rp->tx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3868) 	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3869) 	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3871) 	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3872) 		   rp->tx_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3873) 		   (unsigned long long)cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3874) 		   (unsigned long long)logh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3875) 		   (unsigned long long)logl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3877) 	niu_log_txchan_errors(np, rp, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3879) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3882) static int niu_mif_interrupt(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3884) 	u64 mif_status = nr64(MIF_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3885) 	int phy_mdint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3887) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3888) 		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3890) 		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3891) 			phy_mdint = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3894) 	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3895) 		   (unsigned long long)mif_status, phy_mdint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3897) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3900) static void niu_xmac_interrupt(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3902) 	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3903) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3905) 	val = nr64_mac(XTXMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3906) 	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3907) 		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3908) 	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3909) 		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3910) 	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3911) 		mp->tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3912) 	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3913) 		mp->tx_overflow_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3914) 	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3915) 		mp->tx_max_pkt_size_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3916) 	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3917) 		mp->tx_underflow_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3919) 	val = nr64_mac(XRXMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3920) 	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3921) 		mp->rx_local_faults++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3922) 	if (val & XRXMAC_STATUS_RFLT_DET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3923) 		mp->rx_remote_faults++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3924) 	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3925) 		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3926) 	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3927) 		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3928) 	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3929) 		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3930) 	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3931) 		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3932) 	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3933) 		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3934) 	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3935) 		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3936) 	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3937) 		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3938) 	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3939) 		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3940) 	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3941) 		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3942) 	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3943) 		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3944) 	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3945) 		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3946) 	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3947) 		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3948) 	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3949) 		mp->rx_octets += RXMAC_BT_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3950) 	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3951) 		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3952) 	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3953) 		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3954) 	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3955) 		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3956) 	if (val & XRXMAC_STATUS_RXUFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3957) 		mp->rx_underflows++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3958) 	if (val & XRXMAC_STATUS_RXOFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3959) 		mp->rx_overflows++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3961) 	val = nr64_mac(XMAC_FC_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3962) 	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3963) 		mp->pause_off_state++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3964) 	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3965) 		mp->pause_on_state++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3966) 	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3967) 		mp->pause_received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3970) static void niu_bmac_interrupt(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3972) 	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3973) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3975) 	val = nr64_mac(BTXMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3976) 	if (val & BTXMAC_STATUS_UNDERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3977) 		mp->tx_underflow_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3978) 	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3979) 		mp->tx_max_pkt_size_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3980) 	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3981) 		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3982) 	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3983) 		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3985) 	val = nr64_mac(BRXMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3986) 	if (val & BRXMAC_STATUS_OVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3987) 		mp->rx_overflows++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3988) 	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3989) 		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3990) 	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3991) 		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3992) 	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3993) 		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3994) 	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3995) 		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3997) 	val = nr64_mac(BMAC_CTRL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3998) 	if (val & BMAC_CTRL_STATUS_NOPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3999) 		mp->pause_off_state++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4000) 	if (val & BMAC_CTRL_STATUS_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4001) 		mp->pause_on_state++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4002) 	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4003) 		mp->pause_received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4006) static int niu_mac_interrupt(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4008) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4009) 		niu_xmac_interrupt(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4010) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4011) 		niu_bmac_interrupt(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4013) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4016) static void niu_log_device_error(struct niu *np, u64 stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4018) 	netdev_err(np->dev, "Core device errors ( ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4020) 	if (stat & SYS_ERR_MASK_META2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4021) 		pr_cont("META2 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4022) 	if (stat & SYS_ERR_MASK_META1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4023) 		pr_cont("META1 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4024) 	if (stat & SYS_ERR_MASK_PEU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4025) 		pr_cont("PEU ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4026) 	if (stat & SYS_ERR_MASK_TXC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4027) 		pr_cont("TXC ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4028) 	if (stat & SYS_ERR_MASK_RDMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4029) 		pr_cont("RDMC ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4030) 	if (stat & SYS_ERR_MASK_TDMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4031) 		pr_cont("TDMC ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4032) 	if (stat & SYS_ERR_MASK_ZCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4033) 		pr_cont("ZCP ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4034) 	if (stat & SYS_ERR_MASK_FFLP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4035) 		pr_cont("FFLP ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4036) 	if (stat & SYS_ERR_MASK_IPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4037) 		pr_cont("IPP ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4038) 	if (stat & SYS_ERR_MASK_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4039) 		pr_cont("MAC ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4040) 	if (stat & SYS_ERR_MASK_SMX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4041) 		pr_cont("SMX ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4043) 	pr_cont(")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4046) static int niu_device_error(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4048) 	u64 stat = nr64(SYS_ERR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4050) 	netdev_err(np->dev, "Core device error, stat[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4051) 		   (unsigned long long)stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4053) 	niu_log_device_error(np, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4055) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4058) static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4059) 			      u64 v0, u64 v1, u64 v2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4062) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4064) 	lp->v0 = v0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4065) 	lp->v1 = v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4066) 	lp->v2 = v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4068) 	if (v1 & 0x00000000ffffffffULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4069) 		u32 rx_vec = (v1 & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4071) 		for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4072) 			struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4074) 			if (rx_vec & (1 << rp->rx_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4075) 				int r = niu_rx_error(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4076) 				if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4077) 					err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4078) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4079) 					if (!v0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4080) 						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4081) 						     RX_DMA_CTL_STAT_MEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4082) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4083) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4084) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4086) 	if (v1 & 0x7fffffff00000000ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4087) 		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4089) 		for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4090) 			struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4092) 			if (tx_vec & (1 << rp->tx_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4093) 				int r = niu_tx_error(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4094) 				if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4095) 					err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4096) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4097) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4099) 	if ((v0 | v1) & 0x8000000000000000ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4100) 		int r = niu_mif_interrupt(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4101) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4102) 			err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4104) 	if (v2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4105) 		if (v2 & 0x01ef) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4106) 			int r = niu_mac_interrupt(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4107) 			if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4108) 				err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4110) 		if (v2 & 0x0210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4111) 			int r = niu_device_error(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4112) 			if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4113) 				err = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4117) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4118) 		niu_enable_interrupts(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4120) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4123) static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4124) 			    int ldn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4126) 	struct rxdma_mailbox *mbox = rp->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4127) 	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4129) 	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4130) 		      RX_DMA_CTL_STAT_RCRTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4131) 	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4133) 	netif_printk(np, intr, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4134) 		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4137) static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4138) 			    int ldn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4140) 	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4142) 	netif_printk(np, intr, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4143) 		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4146) static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4148) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4149) 	u32 rx_vec, tx_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4150) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4152) 	tx_vec = (v0 >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4153) 	rx_vec = (v0 & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4155) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4156) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4157) 		int ldn = LDN_RXDMA(rp->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4159) 		if (parent->ldg_map[ldn] != ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4160) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4162) 		nw64(LD_IM0(ldn), LD_IM0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4163) 		if (rx_vec & (1 << rp->rx_channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4164) 			niu_rxchan_intr(np, rp, ldn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4167) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4168) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4169) 		int ldn = LDN_TXDMA(rp->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4171) 		if (parent->ldg_map[ldn] != ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4172) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4174) 		nw64(LD_IM0(ldn), LD_IM0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4175) 		if (tx_vec & (1 << rp->tx_channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4176) 			niu_txchan_intr(np, rp, ldn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4180) static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4181) 			      u64 v0, u64 v1, u64 v2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4183) 	if (likely(napi_schedule_prep(&lp->napi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4184) 		lp->v0 = v0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4185) 		lp->v1 = v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4186) 		lp->v2 = v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4187) 		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4188) 		__napi_schedule(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4192) static irqreturn_t niu_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4194) 	struct niu_ldg *lp = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4195) 	struct niu *np = lp->np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4196) 	int ldg = lp->ldg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4197) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4198) 	u64 v0, v1, v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4200) 	if (netif_msg_intr(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4201) 		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4202) 		       __func__, lp, ldg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4204) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4206) 	v0 = nr64(LDSV0(ldg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4207) 	v1 = nr64(LDSV1(ldg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4208) 	v2 = nr64(LDSV2(ldg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4210) 	if (netif_msg_intr(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4211) 		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4212) 		       (unsigned long long) v0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4213) 		       (unsigned long long) v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4214) 		       (unsigned long long) v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4216) 	if (unlikely(!v0 && !v1 && !v2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4217) 		spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4218) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4221) 	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4222) 		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4223) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4224) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4226) 	if (likely(v0 & ~((u64)1 << LDN_MIF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4227) 		niu_schedule_napi(np, lp, v0, v1, v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4228) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4229) 		niu_ldg_rearm(np, lp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4230) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4231) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4233) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4236) static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4238) 	if (rp->mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4239) 		np->ops->free_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4240) 				       sizeof(struct rxdma_mailbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4241) 				       rp->mbox, rp->mbox_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4242) 		rp->mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4244) 	if (rp->rcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4245) 		np->ops->free_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4246) 				       MAX_RCR_RING_SIZE * sizeof(__le64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4247) 				       rp->rcr, rp->rcr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4248) 		rp->rcr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4249) 		rp->rcr_table_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4250) 		rp->rcr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4252) 	if (rp->rbr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4253) 		niu_rbr_free(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4255) 		np->ops->free_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4256) 				       MAX_RBR_RING_SIZE * sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4257) 				       rp->rbr, rp->rbr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4258) 		rp->rbr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4259) 		rp->rbr_table_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4260) 		rp->rbr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4262) 	kfree(rp->rxhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4263) 	rp->rxhash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4266) static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4268) 	if (rp->mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4269) 		np->ops->free_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4270) 				       sizeof(struct txdma_mailbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4271) 				       rp->mbox, rp->mbox_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4272) 		rp->mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4274) 	if (rp->descr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4275) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4277) 		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4278) 			if (rp->tx_buffs[i].skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4279) 				(void) release_tx_packet(np, rp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4280) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4282) 		np->ops->free_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4283) 				       MAX_TX_RING_SIZE * sizeof(__le64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4284) 				       rp->descr, rp->descr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4285) 		rp->descr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4286) 		rp->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4287) 		rp->prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4288) 		rp->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4289) 		rp->wrap_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4293) static void niu_free_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4295) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4297) 	if (np->rx_rings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4298) 		for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4299) 			struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4301) 			niu_free_rx_ring_info(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4303) 		kfree(np->rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4304) 		np->rx_rings = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4305) 		np->num_rx_rings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4308) 	if (np->tx_rings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4309) 		for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4310) 			struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4312) 			niu_free_tx_ring_info(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4314) 		kfree(np->tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4315) 		np->tx_rings = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4316) 		np->num_tx_rings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4320) static int niu_alloc_rx_ring_info(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4321) 				  struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4323) 	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4325) 	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4326) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4327) 	if (!rp->rxhash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4328) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4330) 	rp->mbox = np->ops->alloc_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4331) 					   sizeof(struct rxdma_mailbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4332) 					   &rp->mbox_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4333) 	if (!rp->mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4334) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4335) 	if ((unsigned long)rp->mbox & (64UL - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4336) 		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4337) 			   rp->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4338) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4341) 	rp->rcr = np->ops->alloc_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4342) 					  MAX_RCR_RING_SIZE * sizeof(__le64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4343) 					  &rp->rcr_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4344) 	if (!rp->rcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4345) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4346) 	if ((unsigned long)rp->rcr & (64UL - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4347) 		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4348) 			   rp->rcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4349) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4351) 	rp->rcr_table_size = MAX_RCR_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4352) 	rp->rcr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4354) 	rp->rbr = np->ops->alloc_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4355) 					  MAX_RBR_RING_SIZE * sizeof(__le32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4356) 					  &rp->rbr_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4357) 	if (!rp->rbr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4358) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4359) 	if ((unsigned long)rp->rbr & (64UL - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4360) 		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4361) 			   rp->rbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4362) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4364) 	rp->rbr_table_size = MAX_RBR_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4365) 	rp->rbr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4366) 	rp->rbr_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4368) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4371) static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4373) 	int mtu = np->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4375) 	/* These values are recommended by the HW designers for fair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4376) 	 * utilization of DRR amongst the rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4378) 	rp->max_burst = mtu + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4379) 	if (rp->max_burst > 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4380) 		rp->max_burst = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4383) static int niu_alloc_tx_ring_info(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4384) 				  struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4386) 	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4388) 	rp->mbox = np->ops->alloc_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4389) 					   sizeof(struct txdma_mailbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4390) 					   &rp->mbox_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4391) 	if (!rp->mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4392) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4393) 	if ((unsigned long)rp->mbox & (64UL - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4394) 		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4395) 			   rp->mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4396) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4399) 	rp->descr = np->ops->alloc_coherent(np->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4400) 					    MAX_TX_RING_SIZE * sizeof(__le64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4401) 					    &rp->descr_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4402) 	if (!rp->descr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4403) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4404) 	if ((unsigned long)rp->descr & (64UL - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4405) 		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4406) 			   rp->descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4410) 	rp->pending = MAX_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4411) 	rp->prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4412) 	rp->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4413) 	rp->wrap_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4415) 	/* XXX make these configurable... XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4416) 	rp->mark_freq = rp->pending / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4418) 	niu_set_max_burst(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4423) static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4425) 	u16 bss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4427) 	bss = min(PAGE_SHIFT, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4429) 	rp->rbr_block_size = 1 << bss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4430) 	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4432) 	rp->rbr_sizes[0] = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4433) 	rp->rbr_sizes[1] = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4434) 	if (np->dev->mtu > ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4435) 		switch (PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4436) 		case 4 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4437) 			rp->rbr_sizes[2] = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4438) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4440) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4441) 			rp->rbr_sizes[2] = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4442) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4444) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4445) 		rp->rbr_sizes[2] = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4447) 	rp->rbr_sizes[3] = rp->rbr_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4450) static int niu_alloc_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4452) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4453) 	int first_rx_channel, first_tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4454) 	int num_rx_rings, num_tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4455) 	struct rx_ring_info *rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4456) 	struct tx_ring_info *tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4457) 	int i, port, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4459) 	port = np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4460) 	first_rx_channel = first_tx_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4461) 	for (i = 0; i < port; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4462) 		first_rx_channel += parent->rxchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4463) 		first_tx_channel += parent->txchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4466) 	num_rx_rings = parent->rxchan_per_port[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4467) 	num_tx_rings = parent->txchan_per_port[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4469) 	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4470) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4471) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4472) 	if (!rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4473) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4475) 	np->num_rx_rings = num_rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4476) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4477) 	np->rx_rings = rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4479) 	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4481) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4482) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4484) 		rp->np = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4485) 		rp->rx_channel = first_rx_channel + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4487) 		err = niu_alloc_rx_ring_info(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4488) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4489) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4491) 		niu_size_rbr(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4493) 		/* XXX better defaults, configurable, etc... XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4494) 		rp->nonsyn_window = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4495) 		rp->nonsyn_threshold = rp->rcr_table_size - 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4496) 		rp->syn_window = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4497) 		rp->syn_threshold = rp->rcr_table_size - 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4498) 		rp->rcr_pkt_threshold = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4499) 		rp->rcr_timeout = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4500) 		rp->rbr_kick_thresh = RBR_REFILL_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4501) 		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4502) 			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4504) 		err = niu_rbr_fill(np, rp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4505) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4506) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4509) 	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4510) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4511) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4512) 	if (!tx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4513) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4515) 	np->num_tx_rings = num_tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4516) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4517) 	np->tx_rings = tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4519) 	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4521) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4522) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4524) 		rp->np = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4525) 		rp->tx_channel = first_tx_channel + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4527) 		err = niu_alloc_tx_ring_info(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4528) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4529) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4532) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4534) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4535) 	niu_free_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4536) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4539) static int niu_tx_cs_sng_poll(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4541) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4543) 	while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4544) 		u64 val = nr64(TX_CS(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4545) 		if (val & TX_CS_SNG_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4546) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4548) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4551) static int niu_tx_channel_stop(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4553) 	u64 val = nr64(TX_CS(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4555) 	val |= TX_CS_STOP_N_GO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4556) 	nw64(TX_CS(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4558) 	return niu_tx_cs_sng_poll(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4561) static int niu_tx_cs_reset_poll(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4563) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4565) 	while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4566) 		u64 val = nr64(TX_CS(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4567) 		if (!(val & TX_CS_RST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4568) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4570) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4573) static int niu_tx_channel_reset(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4575) 	u64 val = nr64(TX_CS(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4576) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4578) 	val |= TX_CS_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4579) 	nw64(TX_CS(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4581) 	err = niu_tx_cs_reset_poll(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4582) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4583) 		nw64(TX_RING_KICK(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4585) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4588) static int niu_tx_channel_lpage_init(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4590) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4592) 	nw64(TX_LOG_MASK1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4593) 	nw64(TX_LOG_VAL1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4594) 	nw64(TX_LOG_MASK2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4595) 	nw64(TX_LOG_VAL2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4596) 	nw64(TX_LOG_PAGE_RELO1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4597) 	nw64(TX_LOG_PAGE_RELO2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4598) 	nw64(TX_LOG_PAGE_HDL(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4600) 	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4601) 	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4602) 	nw64(TX_LOG_PAGE_VLD(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4604) 	/* XXX TXDMA 32bit mode? XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4606) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4609) static void niu_txc_enable_port(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4611) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4612) 	u64 val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4614) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4615) 	val = nr64(TXC_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4616) 	mask = (u64)1 << np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4617) 	if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4618) 		val |= TXC_CONTROL_ENABLE | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4619) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4620) 		val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4621) 		if ((val & ~TXC_CONTROL_ENABLE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4622) 			val &= ~TXC_CONTROL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4624) 	nw64(TXC_CONTROL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4625) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4628) static void niu_txc_set_imask(struct niu *np, u64 imask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4630) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4631) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4633) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4634) 	val = nr64(TXC_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4635) 	val &= ~TXC_INT_MASK_VAL(np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4636) 	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4637) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4640) static void niu_txc_port_dma_enable(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4642) 	u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4644) 	if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4645) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4647) 		for (i = 0; i < np->num_tx_rings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4648) 			val |= (1 << np->tx_rings[i].tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4650) 	nw64(TXC_PORT_DMA(np->port), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4653) static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4655) 	int err, channel = rp->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4656) 	u64 val, ring_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4658) 	err = niu_tx_channel_stop(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4659) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4660) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4662) 	err = niu_tx_channel_reset(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4663) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4664) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4666) 	err = niu_tx_channel_lpage_init(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4667) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4668) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4670) 	nw64(TXC_DMA_MAX(channel), rp->max_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4671) 	nw64(TX_ENT_MSK(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4673) 	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4674) 			      TX_RNG_CFIG_STADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4675) 		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4676) 			   channel, (unsigned long long)rp->descr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4677) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4680) 	/* The length field in TX_RNG_CFIG is measured in 64-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4681) 	 * blocks.  rp->pending is the number of TX descriptors in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4682) 	 * our ring, 8 bytes each, thus we divide by 8 bytes more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4683) 	 * to get the proper value the chip wants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4684) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4685) 	ring_len = (rp->pending / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4687) 	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4688) 	       rp->descr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4689) 	nw64(TX_RNG_CFIG(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4691) 	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4692) 	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4693) 		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4694) 			    channel, (unsigned long long)rp->mbox_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4695) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4697) 	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4698) 	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4700) 	nw64(TX_CS(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4702) 	rp->last_pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4704) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4707) static void niu_init_rdc_groups(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4709) 	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4710) 	int i, first_table_num = tp->first_table_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4712) 	for (i = 0; i < tp->num_tables; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4713) 		struct rdc_table *tbl = &tp->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4714) 		int this_table = first_table_num + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4715) 		int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4717) 		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4718) 			nw64(RDC_TBL(this_table, slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4719) 			     tbl->rxdma_channel[slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4722) 	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4725) static void niu_init_drr_weight(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4727) 	int type = phy_decode(np->parent->port_phy, np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4728) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4730) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4731) 	case PORT_TYPE_10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4732) 		val = PT_DRR_WEIGHT_DEFAULT_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4733) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4735) 	case PORT_TYPE_1G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4736) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4737) 		val = PT_DRR_WEIGHT_DEFAULT_1G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4738) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4740) 	nw64(PT_DRR_WT(np->port), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4743) static int niu_init_hostinfo(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4745) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4746) 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4747) 	int i, err, num_alt = niu_num_alt_addr(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4748) 	int first_rdc_table = tp->first_table_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4750) 	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4751) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4752) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4754) 	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4755) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4756) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4758) 	for (i = 0; i < num_alt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4759) 		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4760) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4761) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4764) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4767) static int niu_rx_channel_reset(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4769) 	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4770) 				      RXDMA_CFIG1_RST, 1000, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4771) 				      "RXDMA_CFIG1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4774) static int niu_rx_channel_lpage_init(struct niu *np, int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4776) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4778) 	nw64(RX_LOG_MASK1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4779) 	nw64(RX_LOG_VAL1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4780) 	nw64(RX_LOG_MASK2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4781) 	nw64(RX_LOG_VAL2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4782) 	nw64(RX_LOG_PAGE_RELO1(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4783) 	nw64(RX_LOG_PAGE_RELO2(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4784) 	nw64(RX_LOG_PAGE_HDL(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4786) 	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4787) 	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4788) 	nw64(RX_LOG_PAGE_VLD(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4790) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4793) static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4795) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4797) 	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4798) 	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4799) 	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4800) 	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4801) 	nw64(RDC_RED_PARA(rp->rx_channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4804) static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4806) 	u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4808) 	*ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4809) 	switch (rp->rbr_block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4810) 	case 4 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4811) 		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4812) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4813) 	case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4814) 		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4815) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4816) 	case 16 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4817) 		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4818) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4819) 	case 32 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4820) 		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4821) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4822) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4823) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4825) 	val |= RBR_CFIG_B_VLD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4826) 	switch (rp->rbr_sizes[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4827) 	case 2 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4828) 		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4829) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4830) 	case 4 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4831) 		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4832) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4833) 	case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4834) 		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4835) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4836) 	case 16 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4837) 		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4838) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4840) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4841) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4843) 	val |= RBR_CFIG_B_VLD1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4844) 	switch (rp->rbr_sizes[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4845) 	case 1 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4846) 		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4847) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4848) 	case 2 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4849) 		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4850) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4851) 	case 4 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4852) 		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4853) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4854) 	case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4855) 		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4856) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4858) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4859) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4861) 	val |= RBR_CFIG_B_VLD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4862) 	switch (rp->rbr_sizes[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4863) 	case 256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4864) 		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4866) 	case 512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4867) 		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4868) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4869) 	case 1 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4870) 		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4871) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4872) 	case 2 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4873) 		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4874) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4876) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4877) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4880) 	*ret = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4881) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4884) static int niu_enable_rx_channel(struct niu *np, int channel, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4886) 	u64 val = nr64(RXDMA_CFIG1(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4887) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4889) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4890) 		val |= RXDMA_CFIG1_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4891) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4892) 		val &= ~RXDMA_CFIG1_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4893) 	nw64(RXDMA_CFIG1(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4895) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4896) 	while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4897) 		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4898) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4899) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4901) 	if (limit <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4902) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4903) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4906) static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4908) 	int err, channel = rp->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4909) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4911) 	err = niu_rx_channel_reset(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4912) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4913) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4915) 	err = niu_rx_channel_lpage_init(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4916) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4917) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4919) 	niu_rx_channel_wred_init(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4921) 	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4922) 	nw64(RX_DMA_CTL_STAT(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4923) 	     (RX_DMA_CTL_STAT_MEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4924) 	      RX_DMA_CTL_STAT_RCRTHRES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4925) 	      RX_DMA_CTL_STAT_RCRTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4926) 	      RX_DMA_CTL_STAT_RBR_EMPTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4927) 	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4928) 	nw64(RXDMA_CFIG2(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4929) 	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4930) 	      RXDMA_CFIG2_FULL_HDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4931) 	nw64(RBR_CFIG_A(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4932) 	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4933) 	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4934) 	err = niu_compute_rbr_cfig_b(rp, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4935) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4936) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4937) 	nw64(RBR_CFIG_B(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4938) 	nw64(RCRCFIG_A(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4939) 	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4940) 	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4941) 	nw64(RCRCFIG_B(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4942) 	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4943) 	     RCRCFIG_B_ENTOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4944) 	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4946) 	err = niu_enable_rx_channel(np, channel, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4947) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4948) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4950) 	nw64(RBR_KICK(channel), rp->rbr_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4952) 	val = nr64(RX_DMA_CTL_STAT(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4953) 	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4954) 	nw64(RX_DMA_CTL_STAT(channel), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4956) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4959) static int niu_init_rx_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4961) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4962) 	u64 seed = jiffies_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4963) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4965) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4966) 	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4967) 	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4968) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4970) 	/* XXX RXDMA 32bit mode? XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4972) 	niu_init_rdc_groups(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4973) 	niu_init_drr_weight(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4975) 	err = niu_init_hostinfo(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4976) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4977) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4979) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4980) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4982) 		err = niu_init_one_rx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4983) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4984) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4987) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4990) static int niu_set_ip_frag_rule(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4992) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4993) 	struct niu_classifier *cp = &np->clas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4994) 	struct niu_tcam_entry *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4995) 	int index, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4997) 	index = cp->tcam_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4998) 	tp = &parent->tcam[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5000) 	/* Note that the noport bit is the same in both ipv4 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5001) 	 * ipv6 format TCAM entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5002) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5003) 	memset(tp, 0, sizeof(*tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5004) 	tp->key[1] = TCAM_V4KEY1_NOPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5005) 	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5006) 	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5007) 			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5008) 	err = tcam_write(np, index, tp->key, tp->key_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5009) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5010) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5011) 	err = tcam_assoc_write(np, index, tp->assoc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5012) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5013) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5014) 	tp->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5015) 	cp->tcam_valid_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5017) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5020) static int niu_init_classifier_hw(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5022) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5023) 	struct niu_classifier *cp = &np->clas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5024) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5026) 	nw64(H1POLY, cp->h1_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5027) 	nw64(H2POLY, cp->h2_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5029) 	err = niu_init_hostinfo(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5030) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5031) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5033) 	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5034) 		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5036) 		vlan_tbl_write(np, i, np->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5037) 			       vp->vlan_pref, vp->rdc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5040) 	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5041) 		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5043) 		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5044) 						ap->rdc_num, ap->mac_pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5045) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5046) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5049) 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5050) 		int index = i - CLASS_CODE_USER_PROG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5052) 		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5053) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5054) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5055) 		err = niu_set_flow_key(np, i, parent->flow_key[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5056) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5057) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5060) 	err = niu_set_ip_frag_rule(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5061) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5062) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5064) 	tcam_enable(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5066) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5069) static int niu_zcp_write(struct niu *np, int index, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5071) 	nw64(ZCP_RAM_DATA0, data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5072) 	nw64(ZCP_RAM_DATA1, data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5073) 	nw64(ZCP_RAM_DATA2, data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5074) 	nw64(ZCP_RAM_DATA3, data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5075) 	nw64(ZCP_RAM_DATA4, data[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5076) 	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5077) 	nw64(ZCP_RAM_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5078) 	     (ZCP_RAM_ACC_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5079) 	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5080) 	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5082) 	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5083) 				   1000, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5086) static int niu_zcp_read(struct niu *np, int index, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5088) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5090) 	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5091) 				  1000, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5092) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5093) 		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5094) 			   (unsigned long long)nr64(ZCP_RAM_ACC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5095) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5096) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5098) 	nw64(ZCP_RAM_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5099) 	     (ZCP_RAM_ACC_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5100) 	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5101) 	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5103) 	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5104) 				  1000, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5105) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5106) 		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5107) 			   (unsigned long long)nr64(ZCP_RAM_ACC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5108) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5111) 	data[0] = nr64(ZCP_RAM_DATA0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5112) 	data[1] = nr64(ZCP_RAM_DATA1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5113) 	data[2] = nr64(ZCP_RAM_DATA2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5114) 	data[3] = nr64(ZCP_RAM_DATA3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5115) 	data[4] = nr64(ZCP_RAM_DATA4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5120) static void niu_zcp_cfifo_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5122) 	u64 val = nr64(RESET_CFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5124) 	val |= RESET_CFIFO_RST(np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5125) 	nw64(RESET_CFIFO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5126) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5128) 	val &= ~RESET_CFIFO_RST(np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5129) 	nw64(RESET_CFIFO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5132) static int niu_init_zcp(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5134) 	u64 data[5], rbuf[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5135) 	int i, max, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5137) 	if (np->parent->plat_type != PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5138) 		if (np->port == 0 || np->port == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5139) 			max = ATLAS_P0_P1_CFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5140) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5141) 			max = ATLAS_P2_P3_CFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5142) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5143) 		max = NIU_CFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5145) 	data[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5146) 	data[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5147) 	data[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5148) 	data[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5149) 	data[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5151) 	for (i = 0; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5152) 		err = niu_zcp_write(np, i, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5153) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5154) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5155) 		err = niu_zcp_read(np, i, rbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5156) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5157) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5160) 	niu_zcp_cfifo_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5161) 	nw64(CFIFO_ECC(np->port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5162) 	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5163) 	(void) nr64(ZCP_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5164) 	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5166) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5169) static void niu_ipp_write(struct niu *np, int index, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5171) 	u64 val = nr64_ipp(IPP_CFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5173) 	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5174) 	nw64_ipp(IPP_DFIFO_WR_PTR, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5175) 	nw64_ipp(IPP_DFIFO_WR0, data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5176) 	nw64_ipp(IPP_DFIFO_WR1, data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5177) 	nw64_ipp(IPP_DFIFO_WR2, data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5178) 	nw64_ipp(IPP_DFIFO_WR3, data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5179) 	nw64_ipp(IPP_DFIFO_WR4, data[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5180) 	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5183) static void niu_ipp_read(struct niu *np, int index, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5185) 	nw64_ipp(IPP_DFIFO_RD_PTR, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5186) 	data[0] = nr64_ipp(IPP_DFIFO_RD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5187) 	data[1] = nr64_ipp(IPP_DFIFO_RD1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5188) 	data[2] = nr64_ipp(IPP_DFIFO_RD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5189) 	data[3] = nr64_ipp(IPP_DFIFO_RD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5190) 	data[4] = nr64_ipp(IPP_DFIFO_RD4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5193) static int niu_ipp_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5195) 	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5196) 					  1000, 100, "IPP_CFIG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5199) static int niu_init_ipp(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5201) 	u64 data[5], rbuf[5], val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5202) 	int i, max, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5204) 	if (np->parent->plat_type != PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5205) 		if (np->port == 0 || np->port == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5206) 			max = ATLAS_P0_P1_DFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5207) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5208) 			max = ATLAS_P2_P3_DFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5209) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5210) 		max = NIU_DFIFO_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5212) 	data[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5213) 	data[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5214) 	data[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5215) 	data[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5216) 	data[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5218) 	for (i = 0; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5219) 		niu_ipp_write(np, i, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5220) 		niu_ipp_read(np, i, rbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5223) 	(void) nr64_ipp(IPP_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5224) 	(void) nr64_ipp(IPP_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5226) 	err = niu_ipp_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5227) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5228) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5230) 	(void) nr64_ipp(IPP_PKT_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5231) 	(void) nr64_ipp(IPP_BAD_CS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5232) 	(void) nr64_ipp(IPP_ECC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5234) 	(void) nr64_ipp(IPP_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5236) 	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5238) 	val = nr64_ipp(IPP_CFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5239) 	val &= ~IPP_CFIG_IP_MAX_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5240) 	val |= (IPP_CFIG_IPP_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5241) 		IPP_CFIG_DFIFO_ECC_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5242) 		IPP_CFIG_DROP_BAD_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5243) 		IPP_CFIG_CKSUM_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5244) 		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5245) 	nw64_ipp(IPP_CFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5247) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5250) static void niu_handle_led(struct niu *np, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5252) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5253) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5255) 	if ((np->flags & NIU_FLAGS_10G) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5256) 	    (np->flags & NIU_FLAGS_FIBER) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5257) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5258) 			val |= XMAC_CONFIG_LED_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5259) 			val &= ~XMAC_CONFIG_FORCE_LED_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5260) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5261) 			val |= XMAC_CONFIG_FORCE_LED_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5262) 			val &= ~XMAC_CONFIG_LED_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5266) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5269) static void niu_init_xif_xmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5271) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5272) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5274) 	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5275) 		val = nr64(MIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5276) 		val |= MIF_CONFIG_ATCA_GE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5277) 		nw64(MIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5280) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5281) 	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5283) 	val |= XMAC_CONFIG_TX_OUTPUT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5285) 	if (lp->loopback_mode == LOOPBACK_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5286) 		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5287) 		val |= XMAC_CONFIG_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5288) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5289) 		val &= ~XMAC_CONFIG_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5292) 	if (np->flags & NIU_FLAGS_10G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5293) 		val &= ~XMAC_CONFIG_LFS_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5294) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5295) 		val |= XMAC_CONFIG_LFS_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5296) 		if (!(np->flags & NIU_FLAGS_FIBER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5297) 		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5298) 			val |= XMAC_CONFIG_1G_PCS_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5299) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5300) 			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5303) 	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5305) 	if (lp->active_speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5306) 		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5307) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5308) 		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5310) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5312) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5313) 	val &= ~XMAC_CONFIG_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5314) 	if (np->flags & NIU_FLAGS_10G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5315) 		val |= XMAC_CONFIG_MODE_XGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5316) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5317) 		if (lp->active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5318) 			val |= XMAC_CONFIG_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5319) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5320) 			val |= XMAC_CONFIG_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5323) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5326) static void niu_init_xif_bmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5328) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5329) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5331) 	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5333) 	if (lp->loopback_mode == LOOPBACK_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5334) 		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5335) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5336) 		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5338) 	if (lp->active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5339) 		val |= BMAC_XIF_CONFIG_GMII_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5340) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5341) 		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5343) 	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5344) 		 BMAC_XIF_CONFIG_LED_POLARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5346) 	if (!(np->flags & NIU_FLAGS_10G) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5347) 	    !(np->flags & NIU_FLAGS_FIBER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5348) 	    lp->active_speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5349) 		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5350) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5351) 		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5353) 	nw64_mac(BMAC_XIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5356) static void niu_init_xif(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5358) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5359) 		niu_init_xif_xmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5360) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5361) 		niu_init_xif_bmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5364) static void niu_pcs_mii_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5366) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5367) 	u64 val = nr64_pcs(PCS_MII_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5368) 	val |= PCS_MII_CTL_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5369) 	nw64_pcs(PCS_MII_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5370) 	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5371) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5372) 		val = nr64_pcs(PCS_MII_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5376) static void niu_xpcs_reset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5378) 	int limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5379) 	u64 val = nr64_xpcs(XPCS_CONTROL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5380) 	val |= XPCS_CONTROL1_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5381) 	nw64_xpcs(XPCS_CONTROL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5382) 	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5383) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5384) 		val = nr64_xpcs(XPCS_CONTROL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5388) static int niu_init_pcs(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5390) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5391) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5393) 	switch (np->flags & (NIU_FLAGS_10G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5394) 			     NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5395) 			     NIU_FLAGS_XCVR_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5396) 	case NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5397) 		/* 1G fiber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5398) 		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5399) 		nw64_pcs(PCS_DPATH_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5400) 		niu_pcs_mii_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5401) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5403) 	case NIU_FLAGS_10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5404) 	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5405) 	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5406) 		/* 10G SERDES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5407) 		if (!(np->flags & NIU_FLAGS_XMAC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5408) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5410) 		/* 10G copper or fiber */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5411) 		val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5412) 		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5413) 		nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5415) 		niu_xpcs_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5417) 		val = nr64_xpcs(XPCS_CONTROL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5418) 		if (lp->loopback_mode == LOOPBACK_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5419) 			val |= XPCS_CONTROL1_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5420) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5421) 			val &= ~XPCS_CONTROL1_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5422) 		nw64_xpcs(XPCS_CONTROL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5424) 		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5425) 		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5426) 		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5427) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5430) 	case NIU_FLAGS_XCVR_SERDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5431) 		/* 1G SERDES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5432) 		niu_pcs_mii_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5433) 		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5434) 		nw64_pcs(PCS_DPATH_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5435) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5437) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5438) 		/* 1G copper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5439) 	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5440) 		/* 1G RGMII FIBER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5441) 		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5442) 		niu_pcs_mii_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5443) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5445) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5446) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5449) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5452) static int niu_reset_tx_xmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5454) 	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5455) 					  (XTXMAC_SW_RST_REG_RS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5456) 					   XTXMAC_SW_RST_SOFT_RST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5457) 					  1000, 100, "XTXMAC_SW_RST");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5460) static int niu_reset_tx_bmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5462) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5464) 	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5465) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5466) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5467) 		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5468) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5469) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5471) 	if (limit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5472) 		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5473) 			np->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5474) 			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5475) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5478) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5481) static int niu_reset_tx_mac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5483) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5484) 		return niu_reset_tx_xmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5485) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5486) 		return niu_reset_tx_bmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5489) static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5491) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5493) 	val = nr64_mac(XMAC_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5494) 	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5495) 		 XMAC_MIN_RX_MIN_PKT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5496) 	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5497) 	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5498) 	nw64_mac(XMAC_MIN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5500) 	nw64_mac(XMAC_MAX, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5502) 	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5504) 	val = nr64_mac(XMAC_IPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5505) 	if (np->flags & NIU_FLAGS_10G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5506) 		val &= ~XMAC_IPG_IPG_XGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5507) 		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5508) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5509) 		val &= ~XMAC_IPG_IPG_MII_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5510) 		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5512) 	nw64_mac(XMAC_IPG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5514) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5515) 	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5516) 		 XMAC_CONFIG_STRETCH_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5517) 		 XMAC_CONFIG_VAR_MIN_IPG_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5518) 		 XMAC_CONFIG_TX_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5519) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5521) 	nw64_mac(TXMAC_FRM_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5522) 	nw64_mac(TXMAC_BYTE_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5525) static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5527) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5529) 	nw64_mac(BMAC_MIN_FRAME, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5530) 	nw64_mac(BMAC_MAX_FRAME, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5532) 	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5533) 	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5534) 	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5536) 	val = nr64_mac(BTXMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5537) 	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5538) 		 BTXMAC_CONFIG_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5539) 	nw64_mac(BTXMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5542) static void niu_init_tx_mac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5544) 	u64 min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5546) 	min = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5547) 	if (np->dev->mtu > ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5548) 		max = 9216;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5549) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5550) 		max = 1522;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5552) 	/* The XMAC_MIN register only accepts values for TX min which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5553) 	 * have the low 3 bits cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5554) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5555) 	BUG_ON(min & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5557) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5558) 		niu_init_tx_xmac(np, min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5559) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5560) 		niu_init_tx_bmac(np, min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5563) static int niu_reset_rx_xmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5565) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5567) 	nw64_mac(XRXMAC_SW_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5568) 		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5569) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5570) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5571) 		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5572) 						 XRXMAC_SW_RST_SOFT_RST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5573) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5574) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5576) 	if (limit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5577) 		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5578) 			np->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5579) 			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5580) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5583) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5586) static int niu_reset_rx_bmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5588) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5590) 	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5591) 	limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5592) 	while (--limit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5593) 		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5594) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5595) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5597) 	if (limit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5598) 		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5599) 			np->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5600) 			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5601) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5604) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5607) static int niu_reset_rx_mac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5609) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5610) 		return niu_reset_rx_xmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5611) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5612) 		return niu_reset_rx_bmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5615) static void niu_init_rx_xmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5617) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5618) 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5619) 	int first_rdc_table = tp->first_table_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5620) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5621) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5623) 	nw64_mac(XMAC_ADD_FILT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5624) 	nw64_mac(XMAC_ADD_FILT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5625) 	nw64_mac(XMAC_ADD_FILT2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5626) 	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5627) 	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5628) 	for (i = 0; i < MAC_NUM_HASH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5629) 		nw64_mac(XMAC_HASH_TBL(i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5630) 	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5631) 	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5632) 	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5634) 	val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5635) 	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5636) 		 XMAC_CONFIG_PROMISCUOUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5637) 		 XMAC_CONFIG_PROMISC_GROUP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5638) 		 XMAC_CONFIG_ERR_CHK_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5639) 		 XMAC_CONFIG_RX_CRC_CHK_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5640) 		 XMAC_CONFIG_RESERVED_MULTICAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5641) 		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5642) 		 XMAC_CONFIG_ADDR_FILTER_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5643) 		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5644) 		 XMAC_CONFIG_STRIP_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5645) 		 XMAC_CONFIG_PASS_FLOW_CTRL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5646) 		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5647) 	val |= (XMAC_CONFIG_HASH_FILTER_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5648) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5650) 	nw64_mac(RXMAC_BT_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5651) 	nw64_mac(RXMAC_BC_FRM_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5652) 	nw64_mac(RXMAC_MC_FRM_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5653) 	nw64_mac(RXMAC_FRAG_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5654) 	nw64_mac(RXMAC_HIST_CNT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5655) 	nw64_mac(RXMAC_HIST_CNT2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5656) 	nw64_mac(RXMAC_HIST_CNT3, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5657) 	nw64_mac(RXMAC_HIST_CNT4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5658) 	nw64_mac(RXMAC_HIST_CNT5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5659) 	nw64_mac(RXMAC_HIST_CNT6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5660) 	nw64_mac(RXMAC_HIST_CNT7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5661) 	nw64_mac(RXMAC_MPSZER_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5662) 	nw64_mac(RXMAC_CRC_ER_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5663) 	nw64_mac(RXMAC_CD_VIO_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5664) 	nw64_mac(LINK_FAULT_CNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5667) static void niu_init_rx_bmac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5669) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5670) 	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5671) 	int first_rdc_table = tp->first_table_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5672) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5673) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5675) 	nw64_mac(BMAC_ADD_FILT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5676) 	nw64_mac(BMAC_ADD_FILT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5677) 	nw64_mac(BMAC_ADD_FILT2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5678) 	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5679) 	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5680) 	for (i = 0; i < MAC_NUM_HASH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5681) 		nw64_mac(BMAC_HASH_TBL(i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5682) 	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5683) 	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5684) 	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5686) 	val = nr64_mac(BRXMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5687) 	val &= ~(BRXMAC_CONFIG_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5688) 		 BRXMAC_CONFIG_STRIP_PAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5689) 		 BRXMAC_CONFIG_STRIP_FCS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5690) 		 BRXMAC_CONFIG_PROMISC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5691) 		 BRXMAC_CONFIG_PROMISC_GRP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5692) 		 BRXMAC_CONFIG_ADDR_FILT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5693) 		 BRXMAC_CONFIG_DISCARD_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5694) 	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5695) 	nw64_mac(BRXMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5697) 	val = nr64_mac(BMAC_ADDR_CMPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5698) 	val |= BMAC_ADDR_CMPEN_EN0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5699) 	nw64_mac(BMAC_ADDR_CMPEN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5702) static void niu_init_rx_mac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5704) 	niu_set_primary_mac(np, np->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5706) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5707) 		niu_init_rx_xmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5708) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5709) 		niu_init_rx_bmac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5712) static void niu_enable_tx_xmac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5714) 	u64 val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5716) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5717) 		val |= XMAC_CONFIG_TX_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5718) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5719) 		val &= ~XMAC_CONFIG_TX_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5720) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5723) static void niu_enable_tx_bmac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5725) 	u64 val = nr64_mac(BTXMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5727) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5728) 		val |= BTXMAC_CONFIG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5729) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5730) 		val &= ~BTXMAC_CONFIG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5731) 	nw64_mac(BTXMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5734) static void niu_enable_tx_mac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5736) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5737) 		niu_enable_tx_xmac(np, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5738) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5739) 		niu_enable_tx_bmac(np, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5742) static void niu_enable_rx_xmac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5744) 	u64 val = nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5746) 	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5747) 		 XMAC_CONFIG_PROMISCUOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5749) 	if (np->flags & NIU_FLAGS_MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5750) 		val |= XMAC_CONFIG_HASH_FILTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5751) 	if (np->flags & NIU_FLAGS_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5752) 		val |= XMAC_CONFIG_PROMISCUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5754) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5755) 		val |= XMAC_CONFIG_RX_MAC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5756) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5757) 		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5758) 	nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5761) static void niu_enable_rx_bmac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5763) 	u64 val = nr64_mac(BRXMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5765) 	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5766) 		 BRXMAC_CONFIG_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5768) 	if (np->flags & NIU_FLAGS_MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5769) 		val |= BRXMAC_CONFIG_HASH_FILT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5770) 	if (np->flags & NIU_FLAGS_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5771) 		val |= BRXMAC_CONFIG_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5773) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5774) 		val |= BRXMAC_CONFIG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5775) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5776) 		val &= ~BRXMAC_CONFIG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5777) 	nw64_mac(BRXMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5780) static void niu_enable_rx_mac(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5782) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5783) 		niu_enable_rx_xmac(np, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5784) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5785) 		niu_enable_rx_bmac(np, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5788) static int niu_init_mac(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5790) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5792) 	niu_init_xif(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5793) 	err = niu_init_pcs(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5794) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5795) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5797) 	err = niu_reset_tx_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5798) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5799) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5800) 	niu_init_tx_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5801) 	err = niu_reset_rx_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5802) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5803) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5804) 	niu_init_rx_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5806) 	/* This looks hookey but the RX MAC reset we just did will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5807) 	 * undo some of the state we setup in niu_init_tx_mac() so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5808) 	 * have to call it again.  In particular, the RX MAC reset will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5809) 	 * set the XMAC_MAX register back to it's default value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5810) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5811) 	niu_init_tx_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5812) 	niu_enable_tx_mac(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5814) 	niu_enable_rx_mac(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5816) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5819) static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5821) 	(void) niu_tx_channel_stop(np, rp->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5824) static void niu_stop_tx_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5826) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5828) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5829) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5831) 		niu_stop_one_tx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5835) static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5837) 	(void) niu_tx_channel_reset(np, rp->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5840) static void niu_reset_tx_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5842) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5844) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5845) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5847) 		niu_reset_one_tx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5851) static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5853) 	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5856) static void niu_stop_rx_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5858) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5860) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5861) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5863) 		niu_stop_one_rx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5867) static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5869) 	int channel = rp->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5871) 	(void) niu_rx_channel_reset(np, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5872) 	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5873) 	nw64(RX_DMA_CTL_STAT(channel), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5874) 	(void) niu_enable_rx_channel(np, channel, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5877) static void niu_reset_rx_channels(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5879) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5881) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5882) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5884) 		niu_reset_one_rx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5888) static void niu_disable_ipp(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5890) 	u64 rd, wr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5891) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5893) 	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5894) 	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5895) 	limit = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5896) 	while (--limit >= 0 && (rd != wr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5897) 		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5898) 		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5900) 	if (limit < 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5901) 	    (rd != 0 && wr != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5902) 		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5903) 			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5904) 			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5907) 	val = nr64_ipp(IPP_CFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5908) 	val &= ~(IPP_CFIG_IPP_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5909) 		 IPP_CFIG_DFIFO_ECC_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5910) 		 IPP_CFIG_DROP_BAD_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5911) 		 IPP_CFIG_CKSUM_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5912) 	nw64_ipp(IPP_CFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5914) 	(void) niu_ipp_reset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5917) static int niu_init_hw(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5919) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5921) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5922) 	niu_txc_enable_port(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5923) 	niu_txc_port_dma_enable(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5924) 	niu_txc_set_imask(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5926) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5927) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5928) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5930) 		err = niu_init_one_tx_channel(np, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5931) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5932) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5935) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5936) 	err = niu_init_rx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5937) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5938) 		goto out_uninit_tx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5940) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5941) 	err = niu_init_classifier_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5942) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5943) 		goto out_uninit_rx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5945) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5946) 	err = niu_init_zcp(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5947) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5948) 		goto out_uninit_rx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5950) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5951) 	err = niu_init_ipp(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5952) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5953) 		goto out_uninit_rx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5955) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5956) 	err = niu_init_mac(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5957) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5958) 		goto out_uninit_ipp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5960) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5962) out_uninit_ipp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5963) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5964) 	niu_disable_ipp(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5966) out_uninit_rx_channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5967) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5968) 	niu_stop_rx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5969) 	niu_reset_rx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5971) out_uninit_tx_channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5972) 	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5973) 	niu_stop_tx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5974) 	niu_reset_tx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5976) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5979) static void niu_stop_hw(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5981) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5982) 	niu_enable_interrupts(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5984) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5985) 	niu_enable_rx_mac(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5987) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5988) 	niu_disable_ipp(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5990) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5991) 	niu_stop_tx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5993) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5994) 	niu_stop_rx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5996) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5997) 	niu_reset_tx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5999) 	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6000) 	niu_reset_rx_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6003) static void niu_set_irq_name(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6005) 	int port = np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6006) 	int i, j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6008) 	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6010) 	if (port == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6011) 		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6012) 		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6013) 		j = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6016) 	for (i = 0; i < np->num_ldg - j; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6017) 		if (i < np->num_rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6018) 			sprintf(np->irq_name[i+j], "%s-rx-%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6019) 				np->dev->name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6020) 		else if (i < np->num_tx_rings + np->num_rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6021) 			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6022) 				i - np->num_rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6026) static int niu_request_irq(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6028) 	int i, j, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6030) 	niu_set_irq_name(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6032) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6033) 	for (i = 0; i < np->num_ldg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6034) 		struct niu_ldg *lp = &np->ldg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6036) 		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6037) 				  np->irq_name[i], lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6038) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6039) 			goto out_free_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6043) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6045) out_free_irqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6046) 	for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6047) 		struct niu_ldg *lp = &np->ldg[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6049) 		free_irq(lp->irq, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6051) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6054) static void niu_free_irq(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6056) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6058) 	for (i = 0; i < np->num_ldg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6059) 		struct niu_ldg *lp = &np->ldg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6061) 		free_irq(lp->irq, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6065) static void niu_enable_napi(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6067) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6069) 	for (i = 0; i < np->num_ldg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6070) 		napi_enable(&np->ldg[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6073) static void niu_disable_napi(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6075) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6077) 	for (i = 0; i < np->num_ldg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6078) 		napi_disable(&np->ldg[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6081) static int niu_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6083) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6084) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6086) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6088) 	err = niu_alloc_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6089) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6090) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6092) 	err = niu_enable_interrupts(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6093) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6094) 		goto out_free_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6096) 	err = niu_request_irq(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6097) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6098) 		goto out_free_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6100) 	niu_enable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6102) 	spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6104) 	err = niu_init_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6105) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6106) 		timer_setup(&np->timer, niu_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6107) 		np->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6109) 		err = niu_enable_interrupts(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6110) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6111) 			niu_stop_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6114) 	spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6116) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6117) 		niu_disable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6118) 		goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6121) 	netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6123) 	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6124) 		netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6126) 	add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6128) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6130) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6131) 	niu_free_irq(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6133) out_free_channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6134) 	niu_free_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6136) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6137) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6140) static void niu_full_shutdown(struct niu *np, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6142) 	cancel_work_sync(&np->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6144) 	niu_disable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6145) 	netif_tx_stop_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6147) 	del_timer_sync(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6149) 	spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6151) 	niu_stop_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6153) 	spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6156) static int niu_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6158) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6160) 	niu_full_shutdown(np, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6162) 	niu_free_irq(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6164) 	niu_free_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6166) 	niu_handle_led(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6168) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6171) static void niu_sync_xmac_stats(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6173) 	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6175) 	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6176) 	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6178) 	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6179) 	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6180) 	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6181) 	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6182) 	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6183) 	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6184) 	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6185) 	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6186) 	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6187) 	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6188) 	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6189) 	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6190) 	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6191) 	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6192) 	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6193) 	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6196) static void niu_sync_bmac_stats(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6198) 	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6200) 	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6201) 	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6203) 	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6204) 	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6205) 	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6206) 	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6209) static void niu_sync_mac_stats(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6211) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6212) 		niu_sync_xmac_stats(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6213) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6214) 		niu_sync_bmac_stats(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6217) static void niu_get_rx_stats(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6218) 			     struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6220) 	u64 pkts, dropped, errors, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6221) 	struct rx_ring_info *rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6222) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6224) 	pkts = dropped = errors = bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6226) 	rx_rings = READ_ONCE(np->rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6227) 	if (!rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6228) 		goto no_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6230) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6231) 		struct rx_ring_info *rp = &rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6233) 		niu_sync_rx_discard_stats(np, rp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6235) 		pkts += rp->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6236) 		bytes += rp->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6237) 		dropped += rp->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6238) 		errors += rp->rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6241) no_rings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6242) 	stats->rx_packets = pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6243) 	stats->rx_bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6244) 	stats->rx_dropped = dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6245) 	stats->rx_errors = errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6248) static void niu_get_tx_stats(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6249) 			     struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6251) 	u64 pkts, errors, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6252) 	struct tx_ring_info *tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6253) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6255) 	pkts = errors = bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6257) 	tx_rings = READ_ONCE(np->tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6258) 	if (!tx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6259) 		goto no_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6261) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6262) 		struct tx_ring_info *rp = &tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6264) 		pkts += rp->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6265) 		bytes += rp->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6266) 		errors += rp->tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6269) no_rings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6270) 	stats->tx_packets = pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6271) 	stats->tx_bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6272) 	stats->tx_errors = errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6275) static void niu_get_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6276) 			  struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6278) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6280) 	if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6281) 		niu_get_rx_stats(np, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6282) 		niu_get_tx_stats(np, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6286) static void niu_load_hash_xmac(struct niu *np, u16 *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6288) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6290) 	for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6291) 		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6294) static void niu_load_hash_bmac(struct niu *np, u16 *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6296) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6298) 	for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6299) 		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6302) static void niu_load_hash(struct niu *np, u16 *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6304) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6305) 		niu_load_hash_xmac(np, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6306) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6307) 		niu_load_hash_bmac(np, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6310) static void niu_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6312) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6313) 	int i, alt_cnt, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6314) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6315) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6316) 	u16 hash[16] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6318) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6319) 	niu_enable_rx_mac(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6321) 	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6322) 	if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6323) 		np->flags |= NIU_FLAGS_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6324) 	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6325) 		np->flags |= NIU_FLAGS_MCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6327) 	alt_cnt = netdev_uc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6328) 	if (alt_cnt > niu_num_alt_addr(np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6329) 		alt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6330) 		np->flags |= NIU_FLAGS_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6333) 	if (alt_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6334) 		int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6336) 		netdev_for_each_uc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6337) 			err = niu_set_alt_mac(np, index, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6338) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6339) 				netdev_warn(dev, "Error %d adding alt mac %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6340) 					    err, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6341) 			err = niu_enable_alt_mac(np, index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6342) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6343) 				netdev_warn(dev, "Error %d enabling alt mac %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6344) 					    err, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6346) 			index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6347) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6348) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6349) 		int alt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6350) 		if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6351) 			alt_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6352) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6353) 			alt_start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6354) 		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6355) 			err = niu_enable_alt_mac(np, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6356) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6357) 				netdev_warn(dev, "Error %d disabling alt mac %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6358) 					    err, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6361) 	if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6362) 		for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6363) 			hash[i] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6364) 	} else if (!netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6365) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6366) 			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6368) 			crc >>= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6369) 			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6373) 	if (np->flags & NIU_FLAGS_MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6374) 		niu_load_hash(np, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6376) 	niu_enable_rx_mac(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6377) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6380) static int niu_set_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6382) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6383) 	struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6384) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6386) 	if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6387) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6389) 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6391) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6392) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6394) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6395) 	niu_enable_rx_mac(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6396) 	niu_set_primary_mac(np, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6397) 	niu_enable_rx_mac(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6398) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6400) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6403) static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6405) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6408) static void niu_netif_stop(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6410) 	netif_trans_update(np->dev);	/* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6412) 	niu_disable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6414) 	netif_tx_disable(np->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6417) static void niu_netif_start(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6419) 	/* NOTE: unconditional netif_wake_queue is only appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6420) 	 * so long as all callers are assured to have free tx slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6421) 	 * (such as after niu_init_hw).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6422) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6423) 	netif_tx_wake_all_queues(np->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6425) 	niu_enable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6427) 	niu_enable_interrupts(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6430) static void niu_reset_buffers(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6432) 	int i, j, k, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6434) 	if (np->rx_rings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6435) 		for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6436) 			struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6438) 			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6439) 				struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6441) 				page = rp->rxhash[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6442) 				while (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6443) 					struct page *next =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6444) 						(struct page *) page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6445) 					u64 base = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6446) 					base = base >> RBR_DESCR_ADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6447) 					rp->rbr[k++] = cpu_to_le32(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6448) 					page = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6449) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6450) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6451) 			for (; k < MAX_RBR_RING_SIZE; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6452) 				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6453) 				if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6454) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6455) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6457) 			rp->rbr_index = rp->rbr_table_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6458) 			rp->rcr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6459) 			rp->rbr_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6460) 			rp->rbr_refill_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6461) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6463) 	if (np->tx_rings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6464) 		for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6465) 			struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6467) 			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6468) 				if (rp->tx_buffs[j].skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6469) 					(void) release_tx_packet(np, rp, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6470) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6472) 			rp->pending = MAX_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6473) 			rp->prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6474) 			rp->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6475) 			rp->wrap_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6480) static void niu_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6482) 	struct niu *np = container_of(work, struct niu, reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6483) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6484) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6486) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6487) 	if (!netif_running(np->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6488) 		spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6489) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6492) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6494) 	del_timer_sync(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6496) 	niu_netif_stop(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6498) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6500) 	niu_stop_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6502) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6504) 	niu_reset_buffers(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6506) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6508) 	err = niu_init_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6509) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6510) 		np->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6511) 		add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6512) 		niu_netif_start(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6515) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6518) static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6520) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6522) 	dev_err(np->device, "%s: Transmit timed out, resetting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6523) 		dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6525) 	schedule_work(&np->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6528) static void niu_set_txd(struct tx_ring_info *rp, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6529) 			u64 mapping, u64 len, u64 mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6530) 			u64 n_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6532) 	__le64 *desc = &rp->descr[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6534) 	*desc = cpu_to_le64(mark |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6535) 			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6536) 			    (len << TX_DESC_TR_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6537) 			    (mapping & TX_DESC_SAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6540) static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6541) 				u64 pad_bytes, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6543) 	u16 eth_proto, eth_proto_inner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6544) 	u64 csum_bits, l3off, ihl, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6545) 	u8 ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6546) 	int ipv6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6548) 	eth_proto = be16_to_cpu(ehdr->h_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6549) 	eth_proto_inner = eth_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6550) 	if (eth_proto == ETH_P_8021Q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6551) 		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6552) 		__be16 val = vp->h_vlan_encapsulated_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6554) 		eth_proto_inner = be16_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6557) 	ipv6 = ihl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6558) 	switch (skb->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6559) 	case cpu_to_be16(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6560) 		ip_proto = ip_hdr(skb)->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6561) 		ihl = ip_hdr(skb)->ihl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6562) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6563) 	case cpu_to_be16(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6564) 		ip_proto = ipv6_hdr(skb)->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6565) 		ihl = (40 >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6566) 		ipv6 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6567) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6568) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6569) 		ip_proto = ihl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6570) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6573) 	csum_bits = TXHDR_CSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6574) 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6575) 		u64 start, stuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6577) 		csum_bits = (ip_proto == IPPROTO_TCP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6578) 			     TXHDR_CSUM_TCP :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6579) 			     (ip_proto == IPPROTO_UDP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6580) 			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6582) 		start = skb_checksum_start_offset(skb) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6583) 			(pad_bytes + sizeof(struct tx_pkt_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6584) 		stuff = start + skb->csum_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6586) 		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6587) 		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6590) 	l3off = skb_network_offset(skb) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6591) 		(pad_bytes + sizeof(struct tx_pkt_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6593) 	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6594) 	       (len << TXHDR_LEN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6595) 	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6596) 	       (ihl << TXHDR_IHL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6597) 	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6598) 	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6599) 	       (ipv6 ? TXHDR_IP_VER : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6600) 	       csum_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6602) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6605) static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6606) 				  struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6608) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6609) 	unsigned long align, headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6610) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6611) 	struct tx_ring_info *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6612) 	struct tx_pkt_hdr *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6613) 	unsigned int len, nfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6614) 	struct ethhdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6615) 	int prod, i, tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6616) 	u64 mapping, mrk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6618) 	i = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6619) 	rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6620) 	txq = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6622) 	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6623) 		netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6624) 		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6625) 		rp->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6626) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6629) 	if (eth_skb_pad(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6630) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6632) 	len = sizeof(struct tx_pkt_hdr) + 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6633) 	if (skb_headroom(skb) < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6634) 		struct sk_buff *skb_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6636) 		skb_new = skb_realloc_headroom(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6637) 		if (!skb_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6638) 			goto out_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6639) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6640) 		skb = skb_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6641) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6642) 		skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6644) 	align = ((unsigned long) skb->data & (16 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6645) 	headroom = align + sizeof(struct tx_pkt_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6647) 	ehdr = (struct ethhdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6648) 	tp = skb_push(skb, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6650) 	len = skb->len - sizeof(struct tx_pkt_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6651) 	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6652) 	tp->resv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6654) 	len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6655) 	mapping = np->ops->map_single(np->device, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6656) 				      len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6658) 	prod = rp->prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6660) 	rp->tx_buffs[prod].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6661) 	rp->tx_buffs[prod].mapping = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6663) 	mrk = TX_DESC_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6664) 	if (++rp->mark_counter == rp->mark_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6665) 		rp->mark_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6666) 		mrk |= TX_DESC_MARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6667) 		rp->mark_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6670) 	tlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6671) 	nfg = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6672) 	while (tlen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6673) 		tlen -= MAX_TX_DESC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6674) 		nfg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6677) 	while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6678) 		unsigned int this_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6680) 		if (this_len > MAX_TX_DESC_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6681) 			this_len = MAX_TX_DESC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6683) 		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6684) 		mrk = nfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6686) 		prod = NEXT_TX(rp, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6687) 		mapping += this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6688) 		len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6691) 	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6692) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6694) 		len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6695) 		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6696) 					    skb_frag_off(frag), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6697) 					    DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6699) 		rp->tx_buffs[prod].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6700) 		rp->tx_buffs[prod].mapping = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6702) 		niu_set_txd(rp, prod, mapping, len, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6704) 		prod = NEXT_TX(rp, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6707) 	if (prod < rp->prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6708) 		rp->wrap_bit ^= TX_RING_KICK_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6709) 	rp->prod = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6711) 	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6713) 	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6714) 		netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6715) 		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6716) 			netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6719) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6720) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6722) out_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6723) 	rp->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6724) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6725) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6728) static int niu_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6730) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6731) 	int err, orig_jumbo, new_jumbo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6733) 	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6734) 	new_jumbo = (new_mtu > ETH_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6736) 	dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6738) 	if (!netif_running(dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6739) 	    (orig_jumbo == new_jumbo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6740) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6742) 	niu_full_shutdown(np, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6744) 	niu_free_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6746) 	niu_enable_napi(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6748) 	err = niu_alloc_channels(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6749) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6750) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6752) 	spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6754) 	err = niu_init_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6755) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6756) 		timer_setup(&np->timer, niu_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6757) 		np->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6759) 		err = niu_enable_interrupts(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6760) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6761) 			niu_stop_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6764) 	spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6766) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6767) 		netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6768) 		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6769) 			netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6771) 		add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6774) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6777) static void niu_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6778) 			    struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6780) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6781) 	struct niu_vpd *vpd = &np->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6783) 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6784) 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6785) 	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6786) 		vpd->fcode_major, vpd->fcode_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6787) 	if (np->parent->plat_type != PLAT_TYPE_NIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6788) 		strlcpy(info->bus_info, pci_name(np->pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6789) 			sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6792) static int niu_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6793) 				  struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6795) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6796) 	struct niu_link_config *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6798) 	lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6800) 	memset(cmd, 0, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6801) 	cmd->base.phy_address = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6802) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6803) 						lp->supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6804) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6805) 						lp->active_advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6806) 	cmd->base.autoneg = lp->active_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6807) 	cmd->base.speed = lp->active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6808) 	cmd->base.duplex = lp->active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6809) 	cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6811) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6814) static int niu_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6815) 				  const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6817) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6818) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6820) 	ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6821) 						cmd->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6822) 	lp->speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6823) 	lp->duplex = cmd->base.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6824) 	lp->autoneg = cmd->base.autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6825) 	return niu_init_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6828) static u32 niu_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6830) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6831) 	return np->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6834) static void niu_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6836) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6837) 	np->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6840) static int niu_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6842) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6844) 	if (np->link_config.autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6845) 		return niu_init_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6847) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6850) static int niu_get_eeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6852) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6854) 	return np->eeprom_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6857) static int niu_get_eeprom(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6858) 			  struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6860) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6861) 	u32 offset, len, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6863) 	offset = eeprom->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6864) 	len = eeprom->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6866) 	if (offset + len < offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6867) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6868) 	if (offset >= np->eeprom_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6869) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6870) 	if (offset + len > np->eeprom_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6871) 		len = eeprom->len = np->eeprom_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6873) 	if (offset & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6874) 		u32 b_offset, b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6876) 		b_offset = offset & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6877) 		b_count = 4 - b_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6878) 		if (b_count > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6879) 			b_count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6881) 		val = nr64(ESPC_NCR((offset - b_offset) / 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6882) 		memcpy(data, ((char *)&val) + b_offset, b_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6883) 		data += b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6884) 		len -= b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6885) 		offset += b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6887) 	while (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6888) 		val = nr64(ESPC_NCR(offset / 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6889) 		memcpy(data, &val, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6890) 		data += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6891) 		len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6892) 		offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6894) 	if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6895) 		val = nr64(ESPC_NCR(offset / 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6896) 		memcpy(data, &val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6898) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6901) static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6903) 	switch (flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6904) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6905) 	case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6906) 		*pid = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6907) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6908) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6909) 	case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6910) 		*pid = IPPROTO_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6911) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6912) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6913) 	case SCTP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6914) 		*pid = IPPROTO_SCTP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6915) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6916) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6917) 	case AH_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6918) 		*pid = IPPROTO_AH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6919) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6920) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6921) 	case ESP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6922) 		*pid = IPPROTO_ESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6923) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6924) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6925) 		*pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6926) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6930) static int niu_class_to_ethflow(u64 class, int *flow_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6932) 	switch (class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6933) 	case CLASS_CODE_TCP_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6934) 		*flow_type = TCP_V4_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6935) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6936) 	case CLASS_CODE_UDP_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6937) 		*flow_type = UDP_V4_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6938) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6939) 	case CLASS_CODE_AH_ESP_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6940) 		*flow_type = AH_V4_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6941) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6942) 	case CLASS_CODE_SCTP_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6943) 		*flow_type = SCTP_V4_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6944) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6945) 	case CLASS_CODE_TCP_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6946) 		*flow_type = TCP_V6_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6947) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6948) 	case CLASS_CODE_UDP_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6949) 		*flow_type = UDP_V6_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6950) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6951) 	case CLASS_CODE_AH_ESP_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6952) 		*flow_type = AH_V6_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6953) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6954) 	case CLASS_CODE_SCTP_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6955) 		*flow_type = SCTP_V6_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6956) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6957) 	case CLASS_CODE_USER_PROG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6958) 	case CLASS_CODE_USER_PROG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6959) 	case CLASS_CODE_USER_PROG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6960) 	case CLASS_CODE_USER_PROG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6961) 		*flow_type = IP_USER_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6962) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6963) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6964) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6967) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6970) static int niu_ethflow_to_class(int flow_type, u64 *class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6972) 	switch (flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6973) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6974) 		*class = CLASS_CODE_TCP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6975) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6976) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6977) 		*class = CLASS_CODE_UDP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6978) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6979) 	case AH_ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6980) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6981) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6982) 		*class = CLASS_CODE_AH_ESP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6984) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6985) 		*class = CLASS_CODE_SCTP_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6987) 	case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6988) 		*class = CLASS_CODE_TCP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6990) 	case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6991) 		*class = CLASS_CODE_UDP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6992) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6993) 	case AH_ESP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6994) 	case AH_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6995) 	case ESP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6996) 		*class = CLASS_CODE_AH_ESP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6997) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6998) 	case SCTP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6999) 		*class = CLASS_CODE_SCTP_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7000) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7001) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7002) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7005) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7008) static u64 niu_flowkey_to_ethflow(u64 flow_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7010) 	u64 ethflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7012) 	if (flow_key & FLOW_KEY_L2DA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7013) 		ethflow |= RXH_L2DA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7014) 	if (flow_key & FLOW_KEY_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7015) 		ethflow |= RXH_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7016) 	if (flow_key & FLOW_KEY_IPSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7017) 		ethflow |= RXH_IP_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7018) 	if (flow_key & FLOW_KEY_IPDA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7019) 		ethflow |= RXH_IP_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7020) 	if (flow_key & FLOW_KEY_PROTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7021) 		ethflow |= RXH_L3_PROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7022) 	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7023) 		ethflow |= RXH_L4_B_0_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7024) 	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7025) 		ethflow |= RXH_L4_B_2_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7027) 	return ethflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7031) static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7033) 	u64 key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7035) 	if (ethflow & RXH_L2DA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7036) 		key |= FLOW_KEY_L2DA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7037) 	if (ethflow & RXH_VLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7038) 		key |= FLOW_KEY_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7039) 	if (ethflow & RXH_IP_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7040) 		key |= FLOW_KEY_IPSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7041) 	if (ethflow & RXH_IP_DST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7042) 		key |= FLOW_KEY_IPDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7043) 	if (ethflow & RXH_L3_PROTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7044) 		key |= FLOW_KEY_PROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7045) 	if (ethflow & RXH_L4_B_0_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7046) 		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7047) 	if (ethflow & RXH_L4_B_2_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7048) 		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7050) 	*flow_key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7052) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7056) static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7058) 	u64 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7060) 	nfc->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7062) 	if (!niu_ethflow_to_class(nfc->flow_type, &class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7063) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7065) 	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7066) 	    TCAM_KEY_DISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7067) 		nfc->data = RXH_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7068) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7069) 		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7070) 						      CLASS_CODE_USER_PROG1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7071) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7074) static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7075) 					struct ethtool_rx_flow_spec *fsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7077) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7078) 	u16 prt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7080) 	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7081) 	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7083) 	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7084) 	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7086) 	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7087) 	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7089) 	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7090) 	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7092) 	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7093) 		TCAM_V4KEY2_TOS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7094) 	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7095) 		TCAM_V4KEY2_TOS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7097) 	switch (fsp->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7098) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7099) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7100) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7101) 		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7102) 			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7103) 		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7105) 		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7106) 			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7107) 		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7109) 		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7110) 			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7111) 		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7113) 		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7114) 			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7115) 		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7116) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7117) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7118) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7119) 		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7120) 			TCAM_V4KEY2_PORT_SPI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7121) 		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7123) 		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7124) 			TCAM_V4KEY2_PORT_SPI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7125) 		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7126) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7127) 	case IP_USER_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7128) 		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7129) 			TCAM_V4KEY2_PORT_SPI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7130) 		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7132) 		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7133) 			TCAM_V4KEY2_PORT_SPI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7134) 		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7136) 		fsp->h_u.usr_ip4_spec.proto =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7137) 			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7138) 			TCAM_V4KEY2_PROTO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7139) 		fsp->m_u.usr_ip4_spec.proto =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7140) 			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7141) 			TCAM_V4KEY2_PROTO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7143) 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7144) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7145) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7146) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7150) static int niu_get_ethtool_tcam_entry(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7151) 				      struct ethtool_rxnfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7153) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7154) 	struct niu_tcam_entry *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7155) 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7156) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7157) 	u64 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7158) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7160) 	idx = tcam_get_index(np, (u16)nfc->fs.location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7162) 	tp = &parent->tcam[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7163) 	if (!tp->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7164) 		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7165) 			    parent->index, (u16)nfc->fs.location, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7166) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7169) 	/* fill the flow spec entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7170) 	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7171) 		TCAM_V4KEY0_CLASS_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7172) 	ret = niu_class_to_ethflow(class, &fsp->flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7173) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7174) 		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7175) 			    parent->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7176) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7179) 	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7180) 		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7181) 			TCAM_V4KEY2_PROTO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7182) 		if (proto == IPPROTO_ESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7183) 			if (fsp->flow_type == AH_V4_FLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7184) 				fsp->flow_type = ESP_V4_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7185) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7186) 				fsp->flow_type = ESP_V6_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7190) 	switch (fsp->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7191) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7192) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7193) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7194) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7195) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7196) 		niu_get_ip4fs_from_tcam_key(tp, fsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7197) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7198) 	case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7199) 	case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7200) 	case SCTP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7201) 	case AH_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7202) 	case ESP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7203) 		/* Not yet implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7204) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7205) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7206) 	case IP_USER_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7207) 		niu_get_ip4fs_from_tcam_key(tp, fsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7208) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7209) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7210) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7214) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7215) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7217) 	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7218) 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7219) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7220) 		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7221) 			TCAM_ASSOCDATA_OFFSET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7223) 	/* put the tcam size here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7224) 	nfc->data = tcam_get_size(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7225) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7226) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7229) static int niu_get_ethtool_tcam_all(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7230) 				    struct ethtool_rxnfc *nfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7231) 				    u32 *rule_locs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7233) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7234) 	struct niu_tcam_entry *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7235) 	int i, idx, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7236) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7237) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7239) 	/* put the tcam size here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7240) 	nfc->data = tcam_get_size(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7242) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7243) 	for (cnt = 0, i = 0; i < nfc->data; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7244) 		idx = tcam_get_index(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7245) 		tp = &parent->tcam[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7246) 		if (!tp->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7247) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7248) 		if (cnt == nfc->rule_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7249) 			ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7250) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7252) 		rule_locs[cnt] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7253) 		cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7255) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7257) 	nfc->rule_cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7259) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7262) static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7263) 		       u32 *rule_locs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7265) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7266) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7268) 	switch (cmd->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7269) 	case ETHTOOL_GRXFH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7270) 		ret = niu_get_hash_opts(np, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7271) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7272) 	case ETHTOOL_GRXRINGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7273) 		cmd->data = np->num_rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7275) 	case ETHTOOL_GRXCLSRLCNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7276) 		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7277) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7278) 	case ETHTOOL_GRXCLSRULE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7279) 		ret = niu_get_ethtool_tcam_entry(np, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7281) 	case ETHTOOL_GRXCLSRLALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7282) 		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7283) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7284) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7285) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7286) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7289) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7292) static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7294) 	u64 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7295) 	u64 flow_key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7296) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7298) 	if (!niu_ethflow_to_class(nfc->flow_type, &class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7299) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7301) 	if (class < CLASS_CODE_USER_PROG1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7302) 	    class > CLASS_CODE_SCTP_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7303) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7305) 	if (nfc->data & RXH_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7306) 		niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7307) 		flow_key = np->parent->tcam_key[class -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7308) 					       CLASS_CODE_USER_PROG1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7309) 		flow_key |= TCAM_KEY_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7310) 		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7311) 		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7312) 		niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7313) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7314) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7315) 		/* Discard was set before, but is not set now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7316) 		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7317) 		    TCAM_KEY_DISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7318) 			niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7319) 			flow_key = np->parent->tcam_key[class -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7320) 					       CLASS_CODE_USER_PROG1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7321) 			flow_key &= ~TCAM_KEY_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7322) 			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7323) 			     flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7324) 			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7325) 				flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7326) 			niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7330) 	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7331) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7333) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7334) 	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7335) 	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7336) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7341) static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7342) 				       struct niu_tcam_entry *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7343) 				       int l2_rdc_tab, u64 class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7345) 	u8 pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7346) 	u32 sip, dip, sipm, dipm, spi, spim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7347) 	u16 sport, dport, spm, dpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7349) 	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7350) 	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7351) 	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7352) 	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7354) 	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7355) 	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7356) 	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7357) 	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7359) 	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7360) 	tp->key[3] |= dip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7362) 	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7363) 	tp->key_mask[3] |= dipm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7365) 	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7366) 		       TCAM_V4KEY2_TOS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7367) 	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7368) 			    TCAM_V4KEY2_TOS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7369) 	switch (fsp->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7370) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7371) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7372) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7373) 		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7374) 		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7375) 		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7376) 		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7378) 		tp->key[2] |= (((u64)sport << 16) | dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7379) 		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7380) 		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7381) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7382) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7383) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7384) 		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7385) 		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7387) 		tp->key[2] |= spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7388) 		tp->key_mask[2] |= spim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7389) 		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7390) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7391) 	case IP_USER_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7392) 		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7393) 		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7395) 		tp->key[2] |= spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7396) 		tp->key_mask[2] |= spim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7397) 		pid = fsp->h_u.usr_ip4_spec.proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7398) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7399) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7400) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7403) 	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7404) 	if (pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7405) 		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7409) static int niu_add_ethtool_tcam_entry(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7410) 				      struct ethtool_rxnfc *nfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7412) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7413) 	struct niu_tcam_entry *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7414) 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7415) 	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7416) 	int l2_rdc_table = rdc_table->first_table_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7417) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7418) 	u64 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7419) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7420) 	int err, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7422) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7424) 	idx = nfc->fs.location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7425) 	if (idx >= tcam_get_size(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7426) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7428) 	if (fsp->flow_type == IP_USER_FLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7429) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7430) 		int add_usr_cls = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7431) 		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7432) 		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7434) 		if (uspec->ip_ver != ETH_RX_NFC_IP4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7435) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7437) 		niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7439) 		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7440) 			if (parent->l3_cls[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7441) 				if (uspec->proto == parent->l3_cls_pid[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7442) 					class = parent->l3_cls[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7443) 					parent->l3_cls_refcnt[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7444) 					add_usr_cls = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7445) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7446) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7447) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7448) 				/* Program new user IP class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7449) 				switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7450) 				case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7451) 					class = CLASS_CODE_USER_PROG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7452) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7453) 				case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7454) 					class = CLASS_CODE_USER_PROG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7455) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7456) 				case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7457) 					class = CLASS_CODE_USER_PROG3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7458) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7459) 				case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7460) 					class = CLASS_CODE_USER_PROG4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7461) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7462) 				default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7463) 					class = CLASS_CODE_UNRECOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7464) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7465) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7466) 				ret = tcam_user_ip_class_set(np, class, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7467) 							     uspec->proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7468) 							     uspec->tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7469) 							     umask->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7470) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7471) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7473) 				ret = tcam_user_ip_class_enable(np, class, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7474) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7475) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7476) 				parent->l3_cls[i] = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7477) 				parent->l3_cls_pid[i] = uspec->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7478) 				parent->l3_cls_refcnt[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7479) 				add_usr_cls = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7480) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7481) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7482) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7483) 		if (!add_usr_cls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7484) 			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7485) 				    parent->index, __func__, uspec->proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7486) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7487) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7489) 		niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7490) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7491) 		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7492) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7496) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7498) 	idx = tcam_get_index(np, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7499) 	tp = &parent->tcam[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7501) 	memset(tp, 0, sizeof(*tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7503) 	/* fill in the tcam key and mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7504) 	switch (fsp->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7505) 	case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7506) 	case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7507) 	case SCTP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7508) 	case AH_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7509) 	case ESP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7510) 		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7511) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7512) 	case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7513) 	case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7514) 	case SCTP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7515) 	case AH_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7516) 	case ESP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7517) 		/* Not yet implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7518) 		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7519) 			    parent->index, __func__, fsp->flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7520) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7521) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7522) 	case IP_USER_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7523) 		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7524) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7525) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7526) 		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7527) 			    parent->index, __func__, fsp->flow_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7528) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7529) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7532) 	/* fill in the assoc data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7533) 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7534) 		tp->assoc_data = TCAM_ASSOCDATA_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7535) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7536) 		if (fsp->ring_cookie >= np->num_rx_rings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7537) 			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7538) 				    parent->index, __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7539) 				    (long long)fsp->ring_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7540) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7541) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7543) 		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7544) 				  (fsp->ring_cookie <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7545) 				   TCAM_ASSOCDATA_OFFSET_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7548) 	err = tcam_write(np, idx, tp->key, tp->key_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7549) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7550) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7551) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7553) 	err = tcam_assoc_write(np, idx, tp->assoc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7554) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7555) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7556) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7559) 	/* validate the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7560) 	tp->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7561) 	np->clas.tcam_valid_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7563) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7565) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7568) static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7570) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7571) 	struct niu_tcam_entry *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7572) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7573) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7574) 	u64 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7575) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7577) 	if (loc >= tcam_get_size(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7578) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7580) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7582) 	idx = tcam_get_index(np, loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7583) 	tp = &parent->tcam[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7585) 	/* if the entry is of a user defined class, then update*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7586) 	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7587) 		TCAM_V4KEY0_CLASS_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7589) 	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7590) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7591) 		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7592) 			if (parent->l3_cls[i] == class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7593) 				parent->l3_cls_refcnt[i]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7594) 				if (!parent->l3_cls_refcnt[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7595) 					/* disable class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7596) 					ret = tcam_user_ip_class_enable(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7597) 									class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7598) 									0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7599) 					if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7600) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7601) 					parent->l3_cls[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7602) 					parent->l3_cls_pid[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7603) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7604) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7605) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7606) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7607) 		if (i == NIU_L3_PROG_CLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7608) 			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7609) 				    parent->index, __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7610) 				    (unsigned long long)class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7611) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7612) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7616) 	ret = tcam_flush(np, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7617) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7618) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7620) 	/* invalidate the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7621) 	tp->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7622) 	np->clas.tcam_valid_entries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7623) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7624) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7626) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7629) static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7631) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7632) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7634) 	switch (cmd->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7635) 	case ETHTOOL_SRXFH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7636) 		ret = niu_set_hash_opts(np, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7637) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7638) 	case ETHTOOL_SRXCLSRLINS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7639) 		ret = niu_add_ethtool_tcam_entry(np, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7640) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7641) 	case ETHTOOL_SRXCLSRLDEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7642) 		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7643) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7644) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7645) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7646) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7649) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7652) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7653) 	const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7654) } niu_xmac_stat_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7655) 	{ "tx_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7656) 	{ "tx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7657) 	{ "tx_fifo_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7658) 	{ "tx_overflow_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7659) 	{ "tx_max_pkt_size_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7660) 	{ "tx_underflow_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7661) 	{ "rx_local_faults" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7662) 	{ "rx_remote_faults" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7663) 	{ "rx_link_faults" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7664) 	{ "rx_align_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7665) 	{ "rx_frags" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7666) 	{ "rx_mcasts" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7667) 	{ "rx_bcasts" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7668) 	{ "rx_hist_cnt1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7669) 	{ "rx_hist_cnt2" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7670) 	{ "rx_hist_cnt3" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7671) 	{ "rx_hist_cnt4" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7672) 	{ "rx_hist_cnt5" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7673) 	{ "rx_hist_cnt6" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7674) 	{ "rx_hist_cnt7" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7675) 	{ "rx_octets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7676) 	{ "rx_code_violations" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7677) 	{ "rx_len_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7678) 	{ "rx_crc_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7679) 	{ "rx_underflows" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7680) 	{ "rx_overflows" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7681) 	{ "pause_off_state" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7682) 	{ "pause_on_state" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7683) 	{ "pause_received" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7686) #define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7688) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7689) 	const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7690) } niu_bmac_stat_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7691) 	{ "tx_underflow_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7692) 	{ "tx_max_pkt_size_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7693) 	{ "tx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7694) 	{ "tx_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7695) 	{ "rx_overflows" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7696) 	{ "rx_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7697) 	{ "rx_align_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7698) 	{ "rx_crc_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7699) 	{ "rx_len_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7700) 	{ "pause_off_state" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7701) 	{ "pause_on_state" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7702) 	{ "pause_received" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7703) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7705) #define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7707) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7708) 	const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7709) } niu_rxchan_stat_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7710) 	{ "rx_channel" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7711) 	{ "rx_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7712) 	{ "rx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7713) 	{ "rx_dropped" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7714) 	{ "rx_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7715) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7717) #define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7719) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7720) 	const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7721) } niu_txchan_stat_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7722) 	{ "tx_channel" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7723) 	{ "tx_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7724) 	{ "tx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7725) 	{ "tx_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7726) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7728) #define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7730) static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7732) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7733) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7735) 	if (stringset != ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7736) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7738) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7739) 		memcpy(data, niu_xmac_stat_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7740) 		       sizeof(niu_xmac_stat_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7741) 		data += sizeof(niu_xmac_stat_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7742) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7743) 		memcpy(data, niu_bmac_stat_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7744) 		       sizeof(niu_bmac_stat_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7745) 		data += sizeof(niu_bmac_stat_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7747) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7748) 		memcpy(data, niu_rxchan_stat_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7749) 		       sizeof(niu_rxchan_stat_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7750) 		data += sizeof(niu_rxchan_stat_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7752) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7753) 		memcpy(data, niu_txchan_stat_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7754) 		       sizeof(niu_txchan_stat_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7755) 		data += sizeof(niu_txchan_stat_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7759) static int niu_get_sset_count(struct net_device *dev, int stringset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7761) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7763) 	if (stringset != ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7764) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7766) 	return (np->flags & NIU_FLAGS_XMAC ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7767) 		 NUM_XMAC_STAT_KEYS :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7768) 		 NUM_BMAC_STAT_KEYS) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7769) 		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7770) 		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7773) static void niu_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7774) 				  struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7776) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7777) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7779) 	niu_sync_mac_stats(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7780) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7781) 		memcpy(data, &np->mac_stats.xmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7782) 		       sizeof(struct niu_xmac_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7783) 		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7784) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7785) 		memcpy(data, &np->mac_stats.bmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7786) 		       sizeof(struct niu_bmac_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7787) 		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7789) 	for (i = 0; i < np->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7790) 		struct rx_ring_info *rp = &np->rx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7792) 		niu_sync_rx_discard_stats(np, rp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7794) 		data[0] = rp->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7795) 		data[1] = rp->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7796) 		data[2] = rp->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7797) 		data[3] = rp->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7798) 		data[4] = rp->rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7799) 		data += 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7801) 	for (i = 0; i < np->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7802) 		struct tx_ring_info *rp = &np->tx_rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7804) 		data[0] = rp->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7805) 		data[1] = rp->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7806) 		data[2] = rp->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7807) 		data[3] = rp->tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7808) 		data += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7812) static u64 niu_led_state_save(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7814) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7815) 		return nr64_mac(XMAC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7816) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7817) 		return nr64_mac(BMAC_XIF_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7820) static void niu_led_state_restore(struct niu *np, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7822) 	if (np->flags & NIU_FLAGS_XMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7823) 		nw64_mac(XMAC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7824) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7825) 		nw64_mac(BMAC_XIF_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7828) static void niu_force_led(struct niu *np, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7830) 	u64 val, reg, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7832) 	if (np->flags & NIU_FLAGS_XMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7833) 		reg = XMAC_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7834) 		bit = XMAC_CONFIG_FORCE_LED_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7835) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7836) 		reg = BMAC_XIF_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7837) 		bit = BMAC_XIF_CONFIG_LINK_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7840) 	val = nr64_mac(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7841) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7842) 		val |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7843) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7844) 		val &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7845) 	nw64_mac(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7848) static int niu_set_phys_id(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7849) 			   enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7852) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7854) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7855) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7857) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7858) 	case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7859) 		np->orig_led_state = niu_led_state_save(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7860) 		return 1;	/* cycle on/off once per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7862) 	case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7863) 		niu_force_led(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7864) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7866) 	case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7867) 		niu_force_led(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7868) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7870) 	case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7871) 		niu_led_state_restore(np, np->orig_led_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7874) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7877) static const struct ethtool_ops niu_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7878) 	.get_drvinfo		= niu_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7879) 	.get_link		= ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7880) 	.get_msglevel		= niu_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7881) 	.set_msglevel		= niu_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7882) 	.nway_reset		= niu_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7883) 	.get_eeprom_len		= niu_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7884) 	.get_eeprom		= niu_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7885) 	.get_strings		= niu_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7886) 	.get_sset_count		= niu_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7887) 	.get_ethtool_stats	= niu_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7888) 	.set_phys_id		= niu_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7889) 	.get_rxnfc		= niu_get_nfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7890) 	.set_rxnfc		= niu_set_nfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7891) 	.get_link_ksettings	= niu_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7892) 	.set_link_ksettings	= niu_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7893) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7895) static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7896) 			      int ldg, int ldn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7898) 	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7899) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7900) 	if (ldn < 0 || ldn > LDN_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7901) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7903) 	parent->ldg_map[ldn] = ldg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7905) 	if (np->parent->plat_type == PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7906) 		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7907) 		 * the firmware, and we're not supposed to change them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7908) 		 * Validate the mapping, because if it's wrong we probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7909) 		 * won't get any interrupts and that's painful to debug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7910) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7911) 		if (nr64(LDG_NUM(ldn)) != ldg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7912) 			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7913) 				np->port, ldn, ldg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7914) 				(unsigned long long) nr64(LDG_NUM(ldn)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7915) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7916) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7917) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7918) 		nw64(LDG_NUM(ldn), ldg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7923) static int niu_set_ldg_timer_res(struct niu *np, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7925) 	if (res < 0 || res > LDG_TIMER_RES_VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7926) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7929) 	nw64(LDG_TIMER_RES, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7931) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7934) static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7936) 	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7937) 	    (func < 0 || func > 3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7938) 	    (vector < 0 || vector > 0x1f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7939) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7941) 	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7943) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7946) static int niu_pci_eeprom_read(struct niu *np, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7948) 	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7949) 				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7950) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7952) 	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7953) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7955) 	frame = frame_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7956) 	nw64(ESPC_PIO_STAT, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7957) 	limit = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7958) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7959) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7960) 		frame = nr64(ESPC_PIO_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7961) 		if (frame & ESPC_PIO_STAT_READ_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7962) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7963) 	} while (limit--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7964) 	if (!(frame & ESPC_PIO_STAT_READ_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7965) 		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7966) 			(unsigned long long) frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7967) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7970) 	frame = frame_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7971) 	nw64(ESPC_PIO_STAT, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7972) 	limit = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7973) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7974) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7975) 		frame = nr64(ESPC_PIO_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7976) 		if (frame & ESPC_PIO_STAT_READ_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7977) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7978) 	} while (limit--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7979) 	if (!(frame & ESPC_PIO_STAT_READ_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7980) 		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7981) 			(unsigned long long) frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7982) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7985) 	frame = nr64(ESPC_PIO_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7986) 	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7989) static int niu_pci_eeprom_read16(struct niu *np, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7991) 	int err = niu_pci_eeprom_read(np, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7992) 	u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7994) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7995) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7996) 	val = (err << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7997) 	err = niu_pci_eeprom_read(np, off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7998) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7999) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8000) 	val |= (err & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8002) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8005) static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8007) 	int err = niu_pci_eeprom_read(np, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8008) 	u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8010) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8011) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8013) 	val = (err & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8014) 	err = niu_pci_eeprom_read(np, off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8015) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8016) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8018) 	val |= (err & 0xff) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8020) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8023) static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8024) 				    int namebuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8026) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8028) 	for (i = 0; i < namebuf_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8029) 		int err = niu_pci_eeprom_read(np, off + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8030) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8031) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8032) 		*namebuf++ = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8033) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8034) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8036) 	if (i >= namebuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8037) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8039) 	return i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8042) static void niu_vpd_parse_version(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8044) 	struct niu_vpd *vpd = &np->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8045) 	int len = strlen(vpd->version) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8046) 	const char *s = vpd->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8047) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8049) 	for (i = 0; i < len - 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8050) 		if (!strncmp(s + i, "FCode ", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8051) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8053) 	if (i >= len - 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8054) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8056) 	s += i + 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8057) 	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8059) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8060) 		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8061) 		     vpd->fcode_major, vpd->fcode_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8062) 	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8063) 	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8064) 	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8065) 		np->flags |= NIU_FLAGS_VPD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8068) /* ESPC_PIO_EN_ENABLE must be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8069) static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8071) 	unsigned int found_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8072) #define FOUND_MASK_MODEL	0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8073) #define FOUND_MASK_BMODEL	0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8074) #define FOUND_MASK_VERS		0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8075) #define FOUND_MASK_MAC		0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8076) #define FOUND_MASK_NMAC		0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8077) #define FOUND_MASK_PHY		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8078) #define FOUND_MASK_ALL		0x0000003f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8080) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8081) 		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8082) 	while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8083) 		int len, err, prop_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8084) 		char namebuf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8085) 		u8 *prop_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8086) 		int max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8088) 		if (found_mask == FOUND_MASK_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8089) 			niu_vpd_parse_version(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8090) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8091) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8093) 		err = niu_pci_eeprom_read(np, start + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8094) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8095) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8096) 		len = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8097) 		start += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8099) 		prop_len = niu_pci_eeprom_read(np, start + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8100) 		if (prop_len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8101) 			return prop_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8102) 		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8103) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8104) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8106) 		prop_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8107) 		max_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8108) 		if (!strcmp(namebuf, "model")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8109) 			prop_buf = np->vpd.model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8110) 			max_len = NIU_VPD_MODEL_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8111) 			found_mask |= FOUND_MASK_MODEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8112) 		} else if (!strcmp(namebuf, "board-model")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8113) 			prop_buf = np->vpd.board_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8114) 			max_len = NIU_VPD_BD_MODEL_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8115) 			found_mask |= FOUND_MASK_BMODEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8116) 		} else if (!strcmp(namebuf, "version")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8117) 			prop_buf = np->vpd.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8118) 			max_len = NIU_VPD_VERSION_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8119) 			found_mask |= FOUND_MASK_VERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8120) 		} else if (!strcmp(namebuf, "local-mac-address")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8121) 			prop_buf = np->vpd.local_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8122) 			max_len = ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8123) 			found_mask |= FOUND_MASK_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8124) 		} else if (!strcmp(namebuf, "num-mac-addresses")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8125) 			prop_buf = &np->vpd.mac_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8126) 			max_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8127) 			found_mask |= FOUND_MASK_NMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8128) 		} else if (!strcmp(namebuf, "phy-type")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8129) 			prop_buf = np->vpd.phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8130) 			max_len = NIU_VPD_PHY_TYPE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8131) 			found_mask |= FOUND_MASK_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8134) 		if (max_len && prop_len > max_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8135) 			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8136) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8139) 		if (prop_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8140) 			u32 off = start + 5 + err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8141) 			int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8143) 			netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8144) 				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8145) 				     namebuf, prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8146) 			for (i = 0; i < prop_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8147) 				err =  niu_pci_eeprom_read(np, off + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8148) 				if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8149) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8150) 				*prop_buf++ = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8151) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8154) 		start += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8157) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8160) /* ESPC_PIO_EN_ENABLE must be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8161) static int niu_pci_vpd_fetch(struct niu *np, u32 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8163) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8164) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8166) 	err = niu_pci_eeprom_read16_swp(np, start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8167) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8168) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8170) 	offset = err + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8172) 	while (start + offset < ESPC_EEPROM_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8173) 		u32 here = start + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8174) 		u32 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8176) 		err = niu_pci_eeprom_read(np, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8177) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8178) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8179) 		if (err != 0x90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8180) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8182) 		err = niu_pci_eeprom_read16_swp(np, here + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8183) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8184) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8186) 		here = start + offset + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8187) 		end = start + offset + err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8189) 		offset += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8191) 		err = niu_pci_vpd_scan_props(np, here, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8192) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8193) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8194) 		/* ret == 1 is not an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8195) 		if (err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8196) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8201) /* ESPC_PIO_EN_ENABLE must be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8202) static u32 niu_pci_vpd_offset(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8204) 	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8205) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8207) 	while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8208) 		ret = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8210) 		/* ROM header signature?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8211) 		err = niu_pci_eeprom_read16(np, start +  0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8212) 		if (err != 0x55aa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8213) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8215) 		/* Apply offset to PCI data structure.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8216) 		err = niu_pci_eeprom_read16(np, start + 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8217) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8218) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8219) 		start += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8221) 		/* Check for "PCIR" signature.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8222) 		err = niu_pci_eeprom_read16(np, start +  0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8223) 		if (err != 0x5043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8224) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8225) 		err = niu_pci_eeprom_read16(np, start +  2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8226) 		if (err != 0x4952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8227) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8229) 		/* Check for OBP image type.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8230) 		err = niu_pci_eeprom_read(np, start + 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8231) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8232) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8233) 		if (err != 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8234) 			err = niu_pci_eeprom_read(np, ret + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8235) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8236) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8238) 			start = ret + (err * 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8239) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8242) 		err = niu_pci_eeprom_read16_swp(np, start + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8243) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8244) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8245) 		ret += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8247) 		err = niu_pci_eeprom_read(np, ret + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8248) 		if (err != 0x82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8249) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8251) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8257) static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8259) 	if (!strcmp(phy_prop, "mif")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8260) 		/* 1G copper, MII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8261) 		np->flags &= ~(NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8262) 			       NIU_FLAGS_10G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8263) 		np->mac_xcvr = MAC_XCVR_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8264) 	} else if (!strcmp(phy_prop, "xgf")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8265) 		/* 10G fiber, XPCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8266) 		np->flags |= (NIU_FLAGS_10G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8267) 			      NIU_FLAGS_FIBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8268) 		np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8269) 	} else if (!strcmp(phy_prop, "pcs")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8270) 		/* 1G fiber, PCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8271) 		np->flags &= ~NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8272) 		np->flags |= NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8273) 		np->mac_xcvr = MAC_XCVR_PCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8274) 	} else if (!strcmp(phy_prop, "xgc")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8275) 		/* 10G copper, XPCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8276) 		np->flags |= NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8277) 		np->flags &= ~NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8278) 		np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8279) 	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8280) 		/* 10G Serdes or 1G Serdes, default to 10G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8281) 		np->flags |= NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8282) 		np->flags &= ~NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8283) 		np->flags |= NIU_FLAGS_XCVR_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8284) 		np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8285) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8286) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8288) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8291) static int niu_pci_vpd_get_nports(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8293) 	int ports = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8295) 	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8296) 	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8297) 	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8298) 	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8299) 	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8300) 		ports = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8301) 	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8302) 		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8303) 		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8304) 		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8305) 		ports = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8308) 	return ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8311) static void niu_pci_vpd_validate(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8313) 	struct net_device *dev = np->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8314) 	struct niu_vpd *vpd = &np->vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8315) 	u8 val8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8317) 	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8318) 		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8320) 		np->flags &= ~NIU_FLAGS_VPD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8321) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8324) 	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8325) 	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8326) 		np->flags |= NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8327) 		np->flags &= ~NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8328) 		np->flags |= NIU_FLAGS_XCVR_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8329) 		np->mac_xcvr = MAC_XCVR_PCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8330) 		if (np->port > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8331) 			np->flags |= NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8332) 			np->flags &= ~NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8333) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8334) 		if (np->flags & NIU_FLAGS_10G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8335) 			np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8336) 	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8337) 		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8338) 			      NIU_FLAGS_HOTPLUG_PHY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8339) 	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8340) 		dev_err(np->device, "Illegal phy string [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8341) 			np->vpd.phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8342) 		dev_err(np->device, "Falling back to SPROM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8343) 		np->flags &= ~NIU_FLAGS_VPD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8344) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8347) 	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8349) 	val8 = dev->dev_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8350) 	dev->dev_addr[5] += np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8351) 	if (dev->dev_addr[5] < val8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8352) 		dev->dev_addr[4]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8355) static int niu_pci_probe_sprom(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8357) 	struct net_device *dev = np->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8358) 	int len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8359) 	u64 val, sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8360) 	u8 val8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8362) 	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8363) 	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8364) 	len = val / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8366) 	np->eeprom_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8368) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8369) 		     "SPROM: Image size %llu\n", (unsigned long long)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8371) 	sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8372) 	for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8373) 		val = nr64(ESPC_NCR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8374) 		sum += (val >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8375) 		sum += (val >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8376) 		sum += (val >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8377) 		sum += (val >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8379) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8380) 		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8381) 	if ((sum & 0xff) != 0xab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8382) 		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8383) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8386) 	val = nr64(ESPC_PHY_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8387) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8388) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8389) 		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8390) 			ESPC_PHY_TYPE_PORT0_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8391) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8392) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8393) 		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8394) 			ESPC_PHY_TYPE_PORT1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8395) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8396) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8397) 		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8398) 			ESPC_PHY_TYPE_PORT2_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8399) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8400) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8401) 		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8402) 			ESPC_PHY_TYPE_PORT3_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8404) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8405) 		dev_err(np->device, "Bogus port number %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8406) 			np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8409) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8410) 		     "SPROM: PHY type %x\n", val8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8412) 	switch (val8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8413) 	case ESPC_PHY_TYPE_1G_COPPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8414) 		/* 1G copper, MII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8415) 		np->flags &= ~(NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8416) 			       NIU_FLAGS_10G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8417) 		np->mac_xcvr = MAC_XCVR_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8418) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8420) 	case ESPC_PHY_TYPE_1G_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8421) 		/* 1G fiber, PCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8422) 		np->flags &= ~NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8423) 		np->flags |= NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8424) 		np->mac_xcvr = MAC_XCVR_PCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8425) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8427) 	case ESPC_PHY_TYPE_10G_COPPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8428) 		/* 10G copper, XPCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8429) 		np->flags |= NIU_FLAGS_10G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8430) 		np->flags &= ~NIU_FLAGS_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8431) 		np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8432) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8434) 	case ESPC_PHY_TYPE_10G_FIBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8435) 		/* 10G fiber, XPCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8436) 		np->flags |= (NIU_FLAGS_10G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8437) 			      NIU_FLAGS_FIBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8438) 		np->mac_xcvr = MAC_XCVR_XPCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8439) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8441) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8442) 		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8443) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8446) 	val = nr64(ESPC_MAC_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8447) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8448) 		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8449) 	dev->dev_addr[0] = (val >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8450) 	dev->dev_addr[1] = (val >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8451) 	dev->dev_addr[2] = (val >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8452) 	dev->dev_addr[3] = (val >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8454) 	val = nr64(ESPC_MAC_ADDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8455) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8456) 		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8457) 	dev->dev_addr[4] = (val >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8458) 	dev->dev_addr[5] = (val >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8460) 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8461) 		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8462) 			dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8463) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8466) 	val8 = dev->dev_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8467) 	dev->dev_addr[5] += np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8468) 	if (dev->dev_addr[5] < val8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8469) 		dev->dev_addr[4]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8471) 	val = nr64(ESPC_MOD_STR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8472) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8473) 		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8474) 	if (val >= 8 * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8475) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8477) 	for (i = 0; i < val; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8478) 		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8480) 		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8481) 		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8482) 		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8483) 		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8485) 	np->vpd.model[val] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8487) 	val = nr64(ESPC_BD_MOD_STR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8488) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8489) 		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8490) 	if (val >= 4 * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8491) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8493) 	for (i = 0; i < val; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8494) 		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8496) 		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8497) 		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8498) 		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8499) 		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8501) 	np->vpd.board_model[val] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8503) 	np->vpd.mac_num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8504) 		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8505) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8506) 		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8508) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8511) static int niu_get_and_validate_port(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8513) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8515) 	if (np->port <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8516) 		np->flags |= NIU_FLAGS_XMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8518) 	if (!parent->num_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8519) 		if (parent->plat_type == PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8520) 			parent->num_ports = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8521) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8522) 			parent->num_ports = niu_pci_vpd_get_nports(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8523) 			if (!parent->num_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8524) 				/* Fall back to SPROM as last resort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8525) 				 * This will fail on most cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8526) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8527) 				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8528) 					ESPC_NUM_PORTS_MACS_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8530) 				/* All of the current probing methods fail on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8531) 				 * Maramba on-board parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8532) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8533) 				if (!parent->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8534) 					parent->num_ports = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8535) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8539) 	if (np->port >= parent->num_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8540) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8542) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8545) static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8546) 		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8548) 	u32 id = (dev_id_1 << 16) | dev_id_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8549) 	u8 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8551) 	if (dev_id_1 < 0 || dev_id_2 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8552) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8553) 	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8554) 		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8555) 		 * test covers the 8706 as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8556) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8557) 		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8558) 		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8559) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8560) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8561) 		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8562) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8565) 	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8566) 		parent->index, id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8567) 		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8568) 		type == PHY_TYPE_PCS ? "PCS" : "MII",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8569) 		phy_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8571) 	if (p->cur[type] >= NIU_MAX_PORTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8572) 		pr_err("Too many PHY ports\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8573) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8575) 	idx = p->cur[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8576) 	p->phy_id[type][idx] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8577) 	p->phy_port[type][idx] = phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8578) 	p->cur[type] = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8579) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8582) static int port_has_10g(struct phy_probe_info *p, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8584) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8586) 	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8587) 		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8588) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8590) 	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8591) 		if (p->phy_port[PHY_TYPE_PCS][i] == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8592) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8595) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8598) static int count_10g_ports(struct phy_probe_info *p, int *lowest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8600) 	int port, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8602) 	cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8603) 	*lowest = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8604) 	for (port = 8; port < 32; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8605) 		if (port_has_10g(p, port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8606) 			if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8607) 				*lowest = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8608) 			cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8612) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8615) static int count_1g_ports(struct phy_probe_info *p, int *lowest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8617) 	*lowest = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8618) 	if (p->cur[PHY_TYPE_MII])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8619) 		*lowest = p->phy_port[PHY_TYPE_MII][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8621) 	return p->cur[PHY_TYPE_MII];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8624) static void niu_n2_divide_channels(struct niu_parent *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8626) 	int num_ports = parent->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8627) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8629) 	for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8630) 		parent->rxchan_per_port[i] = (16 / num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8631) 		parent->txchan_per_port[i] = (16 / num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8633) 		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8634) 			parent->index, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8635) 			parent->rxchan_per_port[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8636) 			parent->txchan_per_port[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8640) static void niu_divide_channels(struct niu_parent *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8641) 				int num_10g, int num_1g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8643) 	int num_ports = parent->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8644) 	int rx_chans_per_10g, rx_chans_per_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8645) 	int tx_chans_per_10g, tx_chans_per_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8646) 	int i, tot_rx, tot_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8648) 	if (!num_10g || !num_1g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8649) 		rx_chans_per_10g = rx_chans_per_1g =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8650) 			(NIU_NUM_RXCHAN / num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8651) 		tx_chans_per_10g = tx_chans_per_1g =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8652) 			(NIU_NUM_TXCHAN / num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8653) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8654) 		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8655) 		rx_chans_per_10g = (NIU_NUM_RXCHAN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8656) 				    (rx_chans_per_1g * num_1g)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8657) 			num_10g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8659) 		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8660) 		tx_chans_per_10g = (NIU_NUM_TXCHAN -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8661) 				    (tx_chans_per_1g * num_1g)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8662) 			num_10g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8665) 	tot_rx = tot_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8666) 	for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8667) 		int type = phy_decode(parent->port_phy, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8669) 		if (type == PORT_TYPE_10G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8670) 			parent->rxchan_per_port[i] = rx_chans_per_10g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8671) 			parent->txchan_per_port[i] = tx_chans_per_10g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8672) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8673) 			parent->rxchan_per_port[i] = rx_chans_per_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8674) 			parent->txchan_per_port[i] = tx_chans_per_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8675) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8676) 		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8677) 			parent->index, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8678) 			parent->rxchan_per_port[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8679) 			parent->txchan_per_port[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8680) 		tot_rx += parent->rxchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8681) 		tot_tx += parent->txchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8684) 	if (tot_rx > NIU_NUM_RXCHAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8685) 		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8686) 		       parent->index, tot_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8687) 		for (i = 0; i < num_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8688) 			parent->rxchan_per_port[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8690) 	if (tot_tx > NIU_NUM_TXCHAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8691) 		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8692) 		       parent->index, tot_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8693) 		for (i = 0; i < num_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8694) 			parent->txchan_per_port[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8696) 	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8697) 		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8698) 			parent->index, tot_rx, tot_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8702) static void niu_divide_rdc_groups(struct niu_parent *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8703) 				  int num_10g, int num_1g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8705) 	int i, num_ports = parent->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8706) 	int rdc_group, rdc_groups_per_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8707) 	int rdc_channel_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8709) 	rdc_group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8710) 	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8712) 	rdc_channel_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8714) 	for (i = 0; i < num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8715) 		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8716) 		int grp, num_channels = parent->rxchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8717) 		int this_channel_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8719) 		tp->first_table_num = rdc_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8720) 		tp->num_tables = rdc_groups_per_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8721) 		this_channel_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8722) 		for (grp = 0; grp < tp->num_tables; grp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8723) 			struct rdc_table *rt = &tp->tables[grp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8724) 			int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8726) 			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8727) 				parent->index, i, tp->first_table_num + grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8728) 			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8729) 				rt->rxdma_channel[slot] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8730) 					rdc_channel_base + this_channel_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8732) 				pr_cont("%d ", rt->rxdma_channel[slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8734) 				if (++this_channel_offset == num_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8735) 					this_channel_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8736) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8737) 			pr_cont("]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8740) 		parent->rdc_default[i] = rdc_channel_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8742) 		rdc_channel_base += num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8743) 		rdc_group += rdc_groups_per_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8747) static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8748) 			       struct phy_probe_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8750) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8751) 	int port, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8753) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8755) 	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8756) 	niu_lock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8757) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8758) 	for (port = 8; port < 32; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8759) 		int dev_id_1, dev_id_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8761) 		dev_id_1 = mdio_read(np, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8762) 				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8763) 		dev_id_2 = mdio_read(np, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8764) 				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8765) 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8766) 				 PHY_TYPE_PMA_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8767) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8769) 		dev_id_1 = mdio_read(np, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8770) 				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8771) 		dev_id_2 = mdio_read(np, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8772) 				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8773) 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8774) 				 PHY_TYPE_PCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8775) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8776) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8777) 		dev_id_1 = mii_read(np, port, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8778) 		dev_id_2 = mii_read(np, port, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8779) 		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8780) 				 PHY_TYPE_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8781) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8782) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8784) 	niu_unlock_parent(np, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8786) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8789) static int walk_phys(struct niu *np, struct niu_parent *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8791) 	struct phy_probe_info *info = &parent->phy_probe_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8792) 	int lowest_10g, lowest_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8793) 	int num_10g, num_1g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8794) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8795) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8797) 	num_10g = num_1g = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8799) 	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8800) 	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8801) 		num_10g = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8802) 		num_1g = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8803) 		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8804) 		parent->num_ports = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8805) 		val = (phy_encode(PORT_TYPE_1G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8806) 		       phy_encode(PORT_TYPE_1G, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8807) 		       phy_encode(PORT_TYPE_1G, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8808) 		       phy_encode(PORT_TYPE_1G, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8809) 	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8810) 		num_10g = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8811) 		num_1g = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8812) 		parent->num_ports = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8813) 		val = (phy_encode(PORT_TYPE_10G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8814) 		       phy_encode(PORT_TYPE_10G, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8815) 	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8816) 		   (parent->plat_type == PLAT_TYPE_NIU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8817) 		/* this is the Monza case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8818) 		if (np->flags & NIU_FLAGS_10G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8819) 			val = (phy_encode(PORT_TYPE_10G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8820) 			       phy_encode(PORT_TYPE_10G, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8821) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8822) 			val = (phy_encode(PORT_TYPE_1G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8823) 			       phy_encode(PORT_TYPE_1G, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8824) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8825) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8826) 		err = fill_phy_probe_info(np, parent, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8827) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8828) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8830) 		num_10g = count_10g_ports(info, &lowest_10g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8831) 		num_1g = count_1g_ports(info, &lowest_1g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8833) 		switch ((num_10g << 4) | num_1g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8834) 		case 0x24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8835) 			if (lowest_1g == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8836) 				parent->plat_type = PLAT_TYPE_VF_P0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8837) 			else if (lowest_1g == 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8838) 				parent->plat_type = PLAT_TYPE_VF_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8839) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8840) 				goto unknown_vg_1g_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8842) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8843) 		case 0x22:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8844) 			val = (phy_encode(PORT_TYPE_10G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8845) 			       phy_encode(PORT_TYPE_10G, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8846) 			       phy_encode(PORT_TYPE_1G, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8847) 			       phy_encode(PORT_TYPE_1G, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8848) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8850) 		case 0x20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8851) 			val = (phy_encode(PORT_TYPE_10G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8852) 			       phy_encode(PORT_TYPE_10G, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8853) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8855) 		case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8856) 			val = phy_encode(PORT_TYPE_10G, np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8857) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8859) 		case 0x14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8860) 			if (lowest_1g == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8861) 				parent->plat_type = PLAT_TYPE_VF_P0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8862) 			else if (lowest_1g == 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8863) 				parent->plat_type = PLAT_TYPE_VF_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8864) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8865) 				goto unknown_vg_1g_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8867) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8868) 		case 0x13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8869) 			if ((lowest_10g & 0x7) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8870) 				val = (phy_encode(PORT_TYPE_10G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8871) 				       phy_encode(PORT_TYPE_1G, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8872) 				       phy_encode(PORT_TYPE_1G, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8873) 				       phy_encode(PORT_TYPE_1G, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8874) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8875) 				val = (phy_encode(PORT_TYPE_1G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8876) 				       phy_encode(PORT_TYPE_10G, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8877) 				       phy_encode(PORT_TYPE_1G, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8878) 				       phy_encode(PORT_TYPE_1G, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8879) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8881) 		case 0x04:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8882) 			if (lowest_1g == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8883) 				parent->plat_type = PLAT_TYPE_VF_P0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8884) 			else if (lowest_1g == 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8885) 				parent->plat_type = PLAT_TYPE_VF_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8886) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8887) 				goto unknown_vg_1g_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8889) 			val = (phy_encode(PORT_TYPE_1G, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8890) 			       phy_encode(PORT_TYPE_1G, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8891) 			       phy_encode(PORT_TYPE_1G, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8892) 			       phy_encode(PORT_TYPE_1G, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8893) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8895) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8896) 			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8897) 			       num_10g, num_1g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8898) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8899) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8902) 	parent->port_phy = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8904) 	if (parent->plat_type == PLAT_TYPE_NIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8905) 		niu_n2_divide_channels(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8906) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8907) 		niu_divide_channels(parent, num_10g, num_1g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8909) 	niu_divide_rdc_groups(parent, num_10g, num_1g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8911) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8913) unknown_vg_1g_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8914) 	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8915) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8918) static int niu_probe_ports(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8920) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8921) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8923) 	if (parent->port_phy == PORT_PHY_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8924) 		err = walk_phys(np, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8925) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8926) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8928) 		niu_set_ldg_timer_res(np, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8929) 		for (i = 0; i <= LDN_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8930) 			niu_ldn_irq_enable(np, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8933) 	if (parent->port_phy == PORT_PHY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8934) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8936) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8939) static int niu_classifier_swstate_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8941) 	struct niu_classifier *cp = &np->clas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8943) 	cp->tcam_top = (u16) np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8944) 	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8945) 	cp->h1_init = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8946) 	cp->h2_init = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8948) 	return fflp_early_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8951) static void niu_link_config_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8953) 	struct niu_link_config *lp = &np->link_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8955) 	lp->advertising = (ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8956) 			   ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8957) 			   ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8958) 			   ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8959) 			   ADVERTISED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8960) 			   ADVERTISED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8961) 			   ADVERTISED_10000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8962) 			   ADVERTISED_Autoneg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8963) 	lp->speed = lp->active_speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8964) 	lp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8965) 	lp->active_duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8966) 	lp->autoneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8967) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8968) 	lp->loopback_mode = LOOPBACK_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8969) 	lp->active_speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8970) 	lp->active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8971) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8972) 	lp->loopback_mode = LOOPBACK_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8973) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8976) static int niu_init_mac_ipp_pcs_base(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8978) 	switch (np->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8979) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8980) 		np->mac_regs = np->regs + XMAC_PORT0_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8981) 		np->ipp_off  = 0x00000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8982) 		np->pcs_off  = 0x04000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8983) 		np->xpcs_off = 0x02000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8984) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8986) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8987) 		np->mac_regs = np->regs + XMAC_PORT1_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8988) 		np->ipp_off  = 0x08000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8989) 		np->pcs_off  = 0x0a000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8990) 		np->xpcs_off = 0x08000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8991) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8993) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8994) 		np->mac_regs = np->regs + BMAC_PORT2_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8995) 		np->ipp_off  = 0x04000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8996) 		np->pcs_off  = 0x0e000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8997) 		np->xpcs_off = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8998) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9000) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9001) 		np->mac_regs = np->regs + BMAC_PORT3_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9002) 		np->ipp_off  = 0x0c000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9003) 		np->pcs_off  = 0x12000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9004) 		np->xpcs_off = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9005) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9007) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9008) 		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9009) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9012) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9015) static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9017) 	struct msix_entry msi_vec[NIU_NUM_LDG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9018) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9019) 	struct pci_dev *pdev = np->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9020) 	int i, num_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9021) 	u8 first_ldg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9023) 	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9024) 	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9025) 		ldg_num_map[i] = first_ldg + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9027) 	num_irqs = (parent->rxchan_per_port[np->port] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9028) 		    parent->txchan_per_port[np->port] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9029) 		    (np->port == 0 ? 3 : 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9030) 	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9032) 	for (i = 0; i < num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9033) 		msi_vec[i].vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9034) 		msi_vec[i].entry = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9037) 	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9038) 	if (num_irqs < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9039) 		np->flags &= ~NIU_FLAGS_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9040) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9043) 	np->flags |= NIU_FLAGS_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9044) 	for (i = 0; i < num_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9045) 		np->ldg[i].irq = msi_vec[i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9046) 	np->num_ldg = num_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9049) static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9051) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9052) 	struct platform_device *op = np->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9053) 	const u32 *int_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9054) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9056) 	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9057) 	if (!int_prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9058) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9060) 	for (i = 0; i < op->archdata.num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9061) 		ldg_num_map[i] = int_prop[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9062) 		np->ldg[i].irq = op->archdata.irqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9065) 	np->num_ldg = op->archdata.num_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9067) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9068) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9069) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9070) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9073) static int niu_ldg_init(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9075) 	struct niu_parent *parent = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9076) 	u8 ldg_num_map[NIU_NUM_LDG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9077) 	int first_chan, num_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9078) 	int i, err, ldg_rotor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9079) 	u8 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9081) 	np->num_ldg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9082) 	np->ldg[0].irq = np->dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9083) 	if (parent->plat_type == PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9084) 		err = niu_n2_irq_init(np, ldg_num_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9085) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9086) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9087) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9088) 		niu_try_msix(np, ldg_num_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9090) 	port = np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9091) 	for (i = 0; i < np->num_ldg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9092) 		struct niu_ldg *lp = &np->ldg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9094) 		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9096) 		lp->np = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9097) 		lp->ldg_num = ldg_num_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9098) 		lp->timer = 2; /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9100) 		/* On N2 NIU the firmware has setup the SID mappings so they go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9101) 		 * to the correct values that will route the LDG to the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9102) 		 * interrupt in the NCU interrupt table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9103) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9104) 		if (np->parent->plat_type != PLAT_TYPE_NIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9105) 			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9106) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9107) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9111) 	/* We adopt the LDG assignment ordering used by the N2 NIU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9112) 	 * 'interrupt' properties because that simplifies a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9113) 	 * things.  This ordering is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9114) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9115) 	 *	MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9116) 	 *	MIF	(if port zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9117) 	 *	SYSERR	(if port zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9118) 	 *	RX channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9119) 	 *	TX channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9120) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9122) 	ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9124) 	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9125) 				  LDN_MAC(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9126) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9127) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9129) 	ldg_rotor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9130) 	if (ldg_rotor == np->num_ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9131) 		ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9133) 	if (port == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9134) 		err = niu_ldg_assign_ldn(np, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9135) 					 ldg_num_map[ldg_rotor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9136) 					 LDN_MIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9137) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9138) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9140) 		ldg_rotor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9141) 		if (ldg_rotor == np->num_ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9142) 			ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9144) 		err = niu_ldg_assign_ldn(np, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9145) 					 ldg_num_map[ldg_rotor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9146) 					 LDN_DEVICE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9147) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9148) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9150) 		ldg_rotor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9151) 		if (ldg_rotor == np->num_ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9152) 			ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9156) 	first_chan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9157) 	for (i = 0; i < port; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9158) 		first_chan += parent->rxchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9159) 	num_chan = parent->rxchan_per_port[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9161) 	for (i = first_chan; i < (first_chan + num_chan); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9162) 		err = niu_ldg_assign_ldn(np, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9163) 					 ldg_num_map[ldg_rotor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9164) 					 LDN_RXDMA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9165) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9166) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9167) 		ldg_rotor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9168) 		if (ldg_rotor == np->num_ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9169) 			ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9172) 	first_chan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9173) 	for (i = 0; i < port; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9174) 		first_chan += parent->txchan_per_port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9175) 	num_chan = parent->txchan_per_port[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9176) 	for (i = first_chan; i < (first_chan + num_chan); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9177) 		err = niu_ldg_assign_ldn(np, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9178) 					 ldg_num_map[ldg_rotor],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9179) 					 LDN_TXDMA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9180) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9181) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9182) 		ldg_rotor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9183) 		if (ldg_rotor == np->num_ldg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9184) 			ldg_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9187) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9190) static void niu_ldg_free(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9192) 	if (np->flags & NIU_FLAGS_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9193) 		pci_disable_msix(np->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9196) static int niu_get_of_props(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9198) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9199) 	struct net_device *dev = np->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9200) 	struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9201) 	const char *phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9202) 	const u8 *mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9203) 	const char *model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9204) 	int prop_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9206) 	if (np->parent->plat_type == PLAT_TYPE_NIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9207) 		dp = np->op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9208) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9209) 		dp = pci_device_to_OF_node(np->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9211) 	phy_type = of_get_property(dp, "phy-type", &prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9212) 	if (!phy_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9213) 		netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9214) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9217) 	if (!strcmp(phy_type, "none"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9218) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9220) 	strcpy(np->vpd.phy_type, phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9222) 	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9223) 		netdev_err(dev, "%pOF: Illegal phy string [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9224) 			   dp, np->vpd.phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9225) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9228) 	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9229) 	if (!mac_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9230) 		netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9231) 			   dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9232) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9234) 	if (prop_len != dev->addr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9235) 		netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9236) 			   dp, prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9238) 	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9239) 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9240) 		netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9241) 		netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9242) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9245) 	model = of_get_property(dp, "model", &prop_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9247) 	if (model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9248) 		strcpy(np->vpd.model, model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9250) 	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9251) 		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9252) 			NIU_FLAGS_HOTPLUG_PHY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9255) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9256) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9257) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9261) static int niu_get_invariants(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9263) 	int err, have_props;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9264) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9266) 	err = niu_get_of_props(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9267) 	if (err == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9268) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9270) 	have_props = !err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9272) 	err = niu_init_mac_ipp_pcs_base(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9273) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9274) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9276) 	if (have_props) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9277) 		err = niu_get_and_validate_port(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9278) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9279) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9281) 	} else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9282) 		if (np->parent->plat_type == PLAT_TYPE_NIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9283) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9285) 		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9286) 		offset = niu_pci_vpd_offset(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9287) 		netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9288) 			     "%s() VPD offset [%08x]\n", __func__, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9289) 		if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9290) 			err = niu_pci_vpd_fetch(np, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9291) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9292) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9293) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9294) 		nw64(ESPC_PIO_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9296) 		if (np->flags & NIU_FLAGS_VPD_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9297) 			niu_pci_vpd_validate(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9298) 			err = niu_get_and_validate_port(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9299) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9300) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9303) 		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9304) 			err = niu_get_and_validate_port(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9305) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9306) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9307) 			err = niu_pci_probe_sprom(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9308) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9309) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9313) 	err = niu_probe_ports(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9314) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9315) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9317) 	niu_ldg_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9319) 	niu_classifier_swstate_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9320) 	niu_link_config_init(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9322) 	err = niu_determine_phy_disposition(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9323) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9324) 		err = niu_init_link(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9326) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9329) static LIST_HEAD(niu_parent_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9330) static DEFINE_MUTEX(niu_parent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9331) static int niu_parent_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9333) static ssize_t show_port_phy(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9334) 			     struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9336) 	struct platform_device *plat_dev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9337) 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9338) 	u32 port_phy = p->port_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9339) 	char *orig_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9340) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9342) 	if (port_phy == PORT_PHY_UNKNOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9343) 	    port_phy == PORT_PHY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9344) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9346) 	for (i = 0; i < p->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9347) 		const char *type_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9348) 		int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9350) 		type = phy_decode(port_phy, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9351) 		if (type == PORT_TYPE_10G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9352) 			type_str = "10G";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9353) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9354) 			type_str = "1G";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9355) 		buf += sprintf(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9356) 			       (i == 0) ? "%s" : " %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9357) 			       type_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9359) 	buf += sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9360) 	return buf - orig_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9363) static ssize_t show_plat_type(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9364) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9366) 	struct platform_device *plat_dev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9367) 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9368) 	const char *type_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9370) 	switch (p->plat_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9371) 	case PLAT_TYPE_ATLAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9372) 		type_str = "atlas";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9373) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9374) 	case PLAT_TYPE_NIU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9375) 		type_str = "niu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9376) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9377) 	case PLAT_TYPE_VF_P0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9378) 		type_str = "vf_p0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9379) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9380) 	case PLAT_TYPE_VF_P1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9381) 		type_str = "vf_p1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9382) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9383) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9384) 		type_str = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9385) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9388) 	return sprintf(buf, "%s\n", type_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9391) static ssize_t __show_chan_per_port(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9392) 				    struct device_attribute *attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9393) 				    int rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9395) 	struct platform_device *plat_dev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9396) 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9397) 	char *orig_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9398) 	u8 *arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9399) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9401) 	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9403) 	for (i = 0; i < p->num_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9404) 		buf += sprintf(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9405) 			       (i == 0) ? "%d" : " %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9406) 			       arr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9408) 	buf += sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9410) 	return buf - orig_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9413) static ssize_t show_rxchan_per_port(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9414) 				    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9416) 	return __show_chan_per_port(dev, attr, buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9419) static ssize_t show_txchan_per_port(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9420) 				    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9422) 	return __show_chan_per_port(dev, attr, buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9425) static ssize_t show_num_ports(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9426) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9428) 	struct platform_device *plat_dev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9429) 	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9431) 	return sprintf(buf, "%d\n", p->num_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9434) static struct device_attribute niu_parent_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9435) 	__ATTR(port_phy, 0444, show_port_phy, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9436) 	__ATTR(plat_type, 0444, show_plat_type, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9437) 	__ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9438) 	__ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9439) 	__ATTR(num_ports, 0444, show_num_ports, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9440) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9443) static struct niu_parent *niu_new_parent(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9444) 					 union niu_parent_id *id, u8 ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9446) 	struct platform_device *plat_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9447) 	struct niu_parent *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9448) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9450) 	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9451) 						   NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9452) 	if (IS_ERR(plat_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9453) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9455) 	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9456) 		int err = device_create_file(&plat_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9457) 					     &niu_parent_attributes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9458) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9459) 			goto fail_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9462) 	p = kzalloc(sizeof(*p), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9463) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9464) 		goto fail_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9466) 	p->index = niu_parent_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9468) 	plat_dev->dev.platform_data = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9469) 	p->plat_dev = plat_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9471) 	memcpy(&p->id, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9472) 	p->plat_type = ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9473) 	INIT_LIST_HEAD(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9474) 	atomic_set(&p->refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9475) 	list_add(&p->list, &niu_parent_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9476) 	spin_lock_init(&p->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9478) 	p->rxdma_clock_divider = 7500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9480) 	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9481) 	if (p->plat_type == PLAT_TYPE_NIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9482) 		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9484) 	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9485) 		int index = i - CLASS_CODE_USER_PROG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9487) 		p->tcam_key[index] = TCAM_KEY_TSEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9488) 		p->flow_key[index] = (FLOW_KEY_IPSA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9489) 				      FLOW_KEY_IPDA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9490) 				      FLOW_KEY_PROTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9491) 				      (FLOW_KEY_L4_BYTE12 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9492) 				       FLOW_KEY_L4_0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9493) 				      (FLOW_KEY_L4_BYTE12 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9494) 				       FLOW_KEY_L4_1_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9497) 	for (i = 0; i < LDN_MAX + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9498) 		p->ldg_map[i] = LDG_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9500) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9502) fail_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9503) 	platform_device_unregister(plat_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9504) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9507) static struct niu_parent *niu_get_parent(struct niu *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9508) 					 union niu_parent_id *id, u8 ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9510) 	struct niu_parent *p, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9511) 	int port = np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9513) 	mutex_lock(&niu_parent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9514) 	p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9515) 	list_for_each_entry(tmp, &niu_parent_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9516) 		if (!memcmp(id, &tmp->id, sizeof(*id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9517) 			p = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9521) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9522) 		p = niu_new_parent(np, id, ptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9524) 	if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9525) 		char port_name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9526) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9528) 		sprintf(port_name, "port%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9529) 		err = sysfs_create_link(&p->plat_dev->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9530) 					&np->device->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9531) 					port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9532) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9533) 			p->ports[port] = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9534) 			atomic_inc(&p->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9537) 	mutex_unlock(&niu_parent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9539) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9542) static void niu_put_parent(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9544) 	struct niu_parent *p = np->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9545) 	u8 port = np->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9546) 	char port_name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9548) 	BUG_ON(!p || p->ports[port] != np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9550) 	netif_printk(np, probe, KERN_DEBUG, np->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9551) 		     "%s() port[%u]\n", __func__, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9553) 	sprintf(port_name, "port%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9555) 	mutex_lock(&niu_parent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9557) 	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9559) 	p->ports[port] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9560) 	np->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9562) 	if (atomic_dec_and_test(&p->refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9563) 		list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9564) 		platform_device_unregister(p->plat_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9567) 	mutex_unlock(&niu_parent_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9570) static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9571) 				    u64 *handle, gfp_t flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9573) 	dma_addr_t dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9574) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9576) 	ret = dma_alloc_coherent(dev, size, &dh, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9577) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9578) 		*handle = dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9579) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9582) static void niu_pci_free_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9583) 				  void *cpu_addr, u64 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9585) 	dma_free_coherent(dev, size, cpu_addr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9588) static u64 niu_pci_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9589) 			    unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9590) 			    enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9592) 	return dma_map_page(dev, page, offset, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9595) static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9596) 			       size_t size, enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9598) 	dma_unmap_page(dev, dma_address, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9601) static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9602) 			      size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9603) 			      enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9605) 	return dma_map_single(dev, cpu_addr, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9608) static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9609) 				 size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9610) 				 enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9612) 	dma_unmap_single(dev, dma_address, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9615) static const struct niu_ops niu_pci_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9616) 	.alloc_coherent	= niu_pci_alloc_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9617) 	.free_coherent	= niu_pci_free_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9618) 	.map_page	= niu_pci_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9619) 	.unmap_page	= niu_pci_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9620) 	.map_single	= niu_pci_map_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9621) 	.unmap_single	= niu_pci_unmap_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9624) static void niu_driver_version(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9626) 	static int niu_version_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9628) 	if (niu_version_printed++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9629) 		pr_info("%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9632) static struct net_device *niu_alloc_and_init(struct device *gen_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9633) 					     struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9634) 					     struct platform_device *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9635) 					     const struct niu_ops *ops, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9637) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9638) 	struct niu *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9640) 	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9641) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9642) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9644) 	SET_NETDEV_DEV(dev, gen_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9646) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9647) 	np->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9648) 	np->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9649) 	np->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9650) 	np->device = gen_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9651) 	np->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9653) 	np->msg_enable = niu_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9655) 	spin_lock_init(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9656) 	INIT_WORK(&np->reset_task, niu_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9658) 	np->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9660) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9663) static const struct net_device_ops niu_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9664) 	.ndo_open		= niu_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9665) 	.ndo_stop		= niu_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9666) 	.ndo_start_xmit		= niu_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9667) 	.ndo_get_stats64	= niu_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9668) 	.ndo_set_rx_mode	= niu_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9669) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9670) 	.ndo_set_mac_address	= niu_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9671) 	.ndo_do_ioctl		= niu_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9672) 	.ndo_tx_timeout		= niu_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9673) 	.ndo_change_mtu		= niu_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9676) static void niu_assign_netdev_ops(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9678) 	dev->netdev_ops = &niu_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9679) 	dev->ethtool_ops = &niu_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9680) 	dev->watchdog_timeo = NIU_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9683) static void niu_device_announce(struct niu *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9685) 	struct net_device *dev = np->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9687) 	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9689) 	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9690) 		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9691) 				dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9692) 				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9693) 				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9694) 				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9695) 				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9696) 				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9697) 				np->vpd.phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9698) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9699) 		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9700) 				dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9701) 				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9702) 				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9703) 				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9704) 				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9705) 				  "COPPER")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9706) 				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9707) 				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9708) 				np->vpd.phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9712) static void niu_set_basic_features(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9714) 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9715) 	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9718) static int niu_pci_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9719) 			    const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9721) 	union niu_parent_id parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9722) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9723) 	struct niu *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9724) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9725) 	u64 dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9727) 	niu_driver_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9729) 	err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9730) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9731) 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9732) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9735) 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9736) 	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9737) 		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9738) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9739) 		goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9742) 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9743) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9744) 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9745) 		goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9748) 	if (!pci_is_pcie(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9749) 		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9750) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9751) 		goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9754) 	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9755) 				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9756) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9757) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9758) 		goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9760) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9762) 	memset(&parent_id, 0, sizeof(parent_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9763) 	parent_id.pci.domain = pci_domain_nr(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9764) 	parent_id.pci.bus = pdev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9765) 	parent_id.pci.device = PCI_SLOT(pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9767) 	np->parent = niu_get_parent(np, &parent_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9768) 				    PLAT_TYPE_ATLAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9769) 	if (!np->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9770) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9771) 		goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9774) 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9775) 		PCI_EXP_DEVCTL_NOSNOOP_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9776) 		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9777) 		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9778) 		PCI_EXP_DEVCTL_RELAX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9780) 	dma_mask = DMA_BIT_MASK(44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9781) 	err = pci_set_dma_mask(pdev, dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9782) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9783) 		dev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9784) 		err = pci_set_consistent_dma_mask(pdev, dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9785) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9786) 			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9787) 			goto err_out_release_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9790) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9791) 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9792) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9793) 			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9794) 			goto err_out_release_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9795) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9798) 	niu_set_basic_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9800) 	dev->priv_flags |= IFF_UNICAST_FLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9802) 	np->regs = pci_ioremap_bar(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9803) 	if (!np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9804) 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9805) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9806) 		goto err_out_release_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9809) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9810) 	pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9812) 	dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9814) 	/* MTU range: 68 - 9216 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9815) 	dev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9816) 	dev->max_mtu = NIU_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9818) 	niu_assign_netdev_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9820) 	err = niu_get_invariants(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9821) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9822) 		if (err != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9823) 			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9824) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9827) 	err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9828) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9829) 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9830) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9833) 	pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9835) 	niu_device_announce(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9837) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9839) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9840) 	if (np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9841) 		iounmap(np->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9842) 		np->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9845) err_out_release_parent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9846) 	niu_put_parent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9848) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9849) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9851) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9852) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9854) err_out_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9855) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9857) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9860) static void niu_pci_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9862) 	struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9864) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9865) 		struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9867) 		unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9868) 		if (np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9869) 			iounmap(np->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9870) 			np->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9873) 		niu_ldg_free(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9875) 		niu_put_parent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9877) 		free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9878) 		pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9879) 		pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9883) static int __maybe_unused niu_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9885) 	struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9886) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9887) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9889) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9890) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9892) 	flush_work(&np->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9893) 	niu_netif_stop(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9895) 	del_timer_sync(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9897) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9898) 	niu_enable_interrupts(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9899) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9901) 	netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9903) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9904) 	niu_stop_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9905) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9907) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9910) static int __maybe_unused niu_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9912) 	struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9913) 	struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9914) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9915) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9917) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9918) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9920) 	netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9922) 	spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9924) 	err = niu_init_hw(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9925) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9926) 		np->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9927) 		add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9928) 		niu_netif_start(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9931) 	spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9933) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9936) static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9938) static struct pci_driver niu_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9939) 	.name		= DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9940) 	.id_table	= niu_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9941) 	.probe		= niu_pci_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9942) 	.remove		= niu_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9943) 	.driver.pm	= &niu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9946) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9947) static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9948) 				     u64 *dma_addr, gfp_t flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9950) 	unsigned long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9951) 	unsigned long page = __get_free_pages(flag, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9953) 	if (page == 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9954) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9955) 	memset((char *)page, 0, PAGE_SIZE << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9956) 	*dma_addr = __pa(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9958) 	return (void *) page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9961) static void niu_phys_free_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9962) 				   void *cpu_addr, u64 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9964) 	unsigned long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9966) 	free_pages((unsigned long) cpu_addr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9969) static u64 niu_phys_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9970) 			     unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9971) 			     enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9973) 	return page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9976) static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9977) 				size_t size, enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9979) 	/* Nothing to do.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9982) static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9983) 			       size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9984) 			       enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9986) 	return __pa(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9989) static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9990) 				  size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9991) 				  enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9993) 	/* Nothing to do.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9996) static const struct niu_ops niu_phys_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9997) 	.alloc_coherent	= niu_phys_alloc_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9998) 	.free_coherent	= niu_phys_free_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9999) 	.map_page	= niu_phys_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) 	.unmap_page	= niu_phys_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) 	.map_single	= niu_phys_map_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) 	.unmap_single	= niu_phys_unmap_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) static int niu_of_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007) 	union niu_parent_id parent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009) 	struct niu *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010) 	const u32 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) 	niu_driver_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) 	reg = of_get_property(op->dev.of_node, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) 	if (!reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017) 		dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) 			op->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022) 	dev = niu_alloc_and_init(&op->dev, NULL, op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) 				 &niu_phys_ops, reg[0] & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030) 	memset(&parent_id, 0, sizeof(parent_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031) 	parent_id.of = of_get_parent(op->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) 	np->parent = niu_get_parent(np, &parent_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) 				    PLAT_TYPE_NIU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035) 	if (!np->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) 		goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) 	niu_set_basic_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) 	np->regs = of_ioremap(&op->resource[1], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) 			      resource_size(&op->resource[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044) 			      "niu regs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) 	if (!np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) 		dev_err(&op->dev, "Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048) 		goto err_out_release_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) 	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) 				    resource_size(&op->resource[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) 				    "niu vregs-1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) 	if (!np->vir_regs_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) 		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) 	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061) 				    resource_size(&op->resource[3]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062) 				    "niu vregs-2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) 	if (!np->vir_regs_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) 		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) 	niu_assign_netdev_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071) 	err = niu_get_invariants(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) 		if (err != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) 			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078) 	err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080) 		dev_err(&op->dev, "Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084) 	platform_set_drvdata(op, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086) 	niu_device_announce(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) 	if (np->vir_regs_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092) 		of_iounmap(&op->resource[2], np->vir_regs_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) 			   resource_size(&op->resource[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) 		np->vir_regs_1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) 	if (np->vir_regs_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098) 		of_iounmap(&op->resource[3], np->vir_regs_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099) 			   resource_size(&op->resource[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) 		np->vir_regs_2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) 	if (np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) 		of_iounmap(&op->resource[1], np->regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105) 			   resource_size(&op->resource[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106) 		np->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109) err_out_release_parent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110) 	niu_put_parent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) static int niu_of_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) 	struct net_device *dev = platform_get_drvdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) 		struct niu *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) 		unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) 		if (np->vir_regs_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129) 			of_iounmap(&op->resource[2], np->vir_regs_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) 				   resource_size(&op->resource[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) 			np->vir_regs_1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) 		if (np->vir_regs_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135) 			of_iounmap(&op->resource[3], np->vir_regs_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) 				   resource_size(&op->resource[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) 			np->vir_regs_2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) 		if (np->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) 			of_iounmap(&op->resource[1], np->regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) 				   resource_size(&op->resource[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143) 			np->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) 		niu_ldg_free(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) 		niu_put_parent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) 		free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155) static const struct of_device_id niu_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157) 		.name = "network",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158) 		.compatible = "SUNW,niusl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162) MODULE_DEVICE_TABLE(of, niu_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164) static struct platform_driver niu_of_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166) 		.name = "niu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167) 		.of_match_table = niu_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) 	.probe		= niu_of_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) 	.remove		= niu_of_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) #endif /* CONFIG_SPARC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) static int __init niu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) 	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) 	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) 	err = platform_driver_register(&niu_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) 		err = pci_register_driver(&niu_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) 			platform_driver_unregister(&niu_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198) static void __exit niu_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) 	pci_unregister_driver(&niu_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) #ifdef CONFIG_SPARC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) 	platform_driver_unregister(&niu_of_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206) module_init(niu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) module_exit(niu_exit);