Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Broadcom Starfighter2 private context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2014, Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __BCM_SF2_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define __BCM_SF2_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <net/dsa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "bcm_sf2_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "b53/b53_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct bcm_sf2_hw_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u16	top_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	u16	core_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	u16	gphy_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	u32	num_gphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	u8	num_acb_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	u8	num_rgmii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	u8	num_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	u8	fcb_pause_override:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	u8	acb_packets_inflight:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define BCM_SF2_REGS_NAME {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	"core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define BCM_SF2_REGS_NUM	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) struct bcm_sf2_port_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned int link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct bcm_sf2_cfp_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	/* Mutex protecting concurrent accesses to the CFP registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	DECLARE_BITMAP(used, CFP_NUM_RULES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	DECLARE_BITMAP(unique, CFP_NUM_RULES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned int rules_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct list_head rules_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) struct bcm_sf2_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	void __iomem			*core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	void __iomem			*reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	void __iomem			*intrl2_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	void __iomem			*intrl2_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	void __iomem			*fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	void __iomem			*acb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct reset_control		*rcdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/* Register offsets indirection tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u32 				type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	const u16			*reg_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	unsigned int			core_reg_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	unsigned int			num_cfp_rules;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* spinlock protecting access to the indirect registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	spinlock_t			indir_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int				irq0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	int				irq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32				irq0_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u32				irq0_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u32				irq1_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	u32				irq1_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* Backing b53_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct b53_device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct bcm_sf2_hw_params	hw_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct bcm_sf2_port_status	port_sts[DSA_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* Mask of ports enabled for Wake-on-LAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	u32				wol_ports_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct clk			*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct clk			*clk_mdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* MoCA port location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	int				moca_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	/* Bitmask of ports having an integrated PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned int			int_phy_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* Master and slave MDIO bus controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned int			indir_phy_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct device_node		*master_mii_dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct mii_bus			*slave_mii_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct mii_bus			*master_mii_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	/* Bitmask of ports needing BRCM tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned int			brcm_tag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/* CFP rules context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct bcm_sf2_cfp_priv		cfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct b53_device *dev = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return dev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return off << priv->core_reg_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SF2_IO_MACRO(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	return readl_relaxed(priv->name + off);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void name##_writel(struct bcm_sf2_priv *priv,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 				  u32 val, u32 off)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	writel_relaxed(val, priv->name + off);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Accesses to 64-bits register requires us to latch the hi/lo pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * using the REG_DIR_DATA_{READ,WRITE} ancillary registers. The 'indir_lock'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * spinlock is automatically grabbed and released to provide relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * atomiticy with latched reads/writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define SF2_IO64_MACRO(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u32 indir, dir;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	spin_lock(&priv->indir_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	dir = name##_readl(priv, off);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	indir = reg_readl(priv, REG_DIR_DATA_READ);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	spin_unlock(&priv->indir_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return (u64)indir << 32 | dir;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 							u32 off)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	spin_lock(&priv->indir_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	name##_writel(priv, lower_32_bits(val), off);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	spin_unlock(&priv->indir_lock);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define SWITCH_INTR_L2(which)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 						u32 mask)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	priv->irq##which##_mask &= ~(mask);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 						u32 mask)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	priv->irq##which##_mask |= (mask);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	u32 tmp = bcm_sf2_mangle_addr(priv, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return readl_relaxed(priv->core + tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u32 tmp = bcm_sf2_mangle_addr(priv, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	writel_relaxed(val, priv->core + tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return readl_relaxed(priv->reg + priv->reg_offsets[off]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	writel_relaxed(val, priv->reg + priv->reg_offsets[off]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) SF2_IO64_MACRO(core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) SF2_IO_MACRO(intrl2_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) SF2_IO_MACRO(intrl2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) SF2_IO_MACRO(fcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) SF2_IO_MACRO(acb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) SWITCH_INTR_L2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) SWITCH_INTR_L2(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* RXNFC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		      struct ethtool_rxnfc *nfc, u32 *rule_locs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		      struct ethtool_rxnfc *nfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void bcm_sf2_cfp_exit(struct dsa_switch *ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int bcm_sf2_cfp_resume(struct dsa_switch *ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			     u32 stringset, uint8_t *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				   uint64_t *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #endif /* __BCM_SF2_H */