^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010 Lantiq Deutschland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 John Crispin <john@phrozen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * The VLAN and bridge model the GSWIP hardware uses does not directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * matches the model DSA uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * The hardware has 64 possible table entries for bridges with one VLAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * ID, one flow id and a list of ports for each bridge. All entries which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * match the same flow ID are combined in the mac learning table, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * act as one global bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The hardware does not support VLAN filter on the port, but on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * bridge, this driver converts the DSA model to the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * The CPU gets all the exception frames which do not match any forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * rule and the CPU port is also added to all bridges. This makes it possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * to handle all the special cases easily in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * At the initialization the driver allocates one bridge table entry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * each switch port which is used when the port is used without an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * explicit bridge. This prevents the frames from being forwarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * between all LAN ports by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/if_bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/of_mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/of_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/phylink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <net/dsa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <dt-bindings/mips/lantiq_rcu_gphy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "lantiq_pce.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* GSWIP MDIO Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define GSWIP_MDIO_GLOB 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define GSWIP_MDIO_GLOB_ENABLE BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define GSWIP_MDIO_CTRL 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define GSWIP_MDIO_CTRL_BUSY BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define GSWIP_MDIO_CTRL_RD BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define GSWIP_MDIO_CTRL_WR BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define GSWIP_MDIO_READ 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define GSWIP_MDIO_WRITE 0x0A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define GSWIP_MDIO_MDC_CFG0 0x0B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define GSWIP_MDIO_MDC_CFG1 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define GSWIP_MDIO_PHYp(p) (0x15 - (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define GSWIP_MDIO_PHY_LINK_MASK 0x6000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define GSWIP_MDIO_PHY_LINK_UP 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define GSWIP_MDIO_PHY_SPEED_M10 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define GSWIP_MDIO_PHY_SPEED_M100 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define GSWIP_MDIO_PHY_SPEED_G1 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define GSWIP_MDIO_PHY_FDUP_EN 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) GSWIP_MDIO_PHY_FCONRX_MASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) GSWIP_MDIO_PHY_FCONTX_MASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) GSWIP_MDIO_PHY_LINK_MASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) GSWIP_MDIO_PHY_SPEED_MASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) GSWIP_MDIO_PHY_FDUP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* GSWIP MII Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define GSWIP_MII_CFGp(p) (0x2 * (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define GSWIP_MII_CFG_RESET BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define GSWIP_MII_CFG_EN BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define GSWIP_MII_CFG_ISOLATE BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define GSWIP_MII_CFG_RGMII_IBS BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define GSWIP_MII_CFG_RMII_CLK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define GSWIP_MII_CFG_MODE_MIIP 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define GSWIP_MII_CFG_MODE_MIIM 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define GSWIP_MII_CFG_MODE_RMIIP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define GSWIP_MII_CFG_MODE_RMIIM 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define GSWIP_MII_CFG_MODE_RGMII 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define GSWIP_MII_CFG_MODE_MASK 0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define GSWIP_MII_CFG_RATE_M2P5 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define GSWIP_MII_CFG_RATE_M25 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define GSWIP_MII_CFG_RATE_M125 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define GSWIP_MII_CFG_RATE_M50 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define GSWIP_MII_CFG_RATE_AUTO 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define GSWIP_MII_CFG_RATE_MASK 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define GSWIP_MII_PCDU0 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define GSWIP_MII_PCDU1 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define GSWIP_MII_PCDU5 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* GSWIP Core Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define GSWIP_SWRES 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define GSWIP_VERSION 0x013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define GSWIP_VERSION_REV_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define GSWIP_VERSION_MOD_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define GSWIP_VERSION_2_0 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define GSWIP_VERSION_2_1 0x021
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define GSWIP_VERSION_2_2 0x122
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define GSWIP_VERSION_2_2_ETC 0x022
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define GSWIP_BM_RAM_ADDR 0x044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define GSWIP_BM_RAM_CTRL 0x045
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define GSWIP_BM_RAM_CTRL_BAS BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define GSWIP_BM_QUEUE_GCTRL 0x04A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* buffer management Port Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* buffer management Port Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* PCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define GSWIP_PCE_TBL_MASK 0x448
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define GSWIP_PCE_TBL_ADDR 0x44E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define GSWIP_PCE_TBL_CTRL 0x44F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define GSWIP_PCE_GCTRL_0 0x456
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define GSWIP_PCE_GCTRL_1 0x457
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define GSWIP_MAC_FLEN 0x8C5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define GSWIP_MAC_CTRL_0_PADEN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Ethernet Switch Fetch DMA Port Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Ethernet Switch Store DMA Port Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define GSWIP_TABLE_ACTIVE_VLAN 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define GSWIP_TABLE_VLAN_MAPPING 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define GSWIP_TABLE_MAC_BRIDGE 0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define XRX200_GPHY_FW_ALIGN (16 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct gswip_hw_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct xway_gphy_match_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) char *fe_firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) char *ge_firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct gswip_gphy_fw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct clk *clk_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct reset_control *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 fw_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) char *fw_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct gswip_vlan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct net_device *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u8 fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct gswip_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __iomem void *gswip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) __iomem void *mdio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) __iomem void *mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) const struct gswip_hw_info *hw_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) const struct xway_gphy_match_data *gphy_fw_name_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct dsa_switch *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct regmap *rcu_regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct gswip_vlan vlans[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int num_gphy_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct gswip_gphy_fw *gphy_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 port_vlan_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct gswip_pce_table_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u16 table; // PCE_TBL_CTRL.ADDR = pData->table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u16 key[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u16 val[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u16 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u8 gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bool type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bool key_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct gswip_rmon_cnt_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /** Receive Packet Count (only packets that are accepted and not discarded). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) MIB_DESC(1, 0x1F, "RxGoodPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) MIB_DESC(1, 0x23, "RxUnicastPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) MIB_DESC(1, 0x22, "RxMulticastPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) MIB_DESC(1, 0x20, "RxGoodPausePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) MIB_DESC(1, 0x12, "Rx64BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) MIB_DESC(1, 0x13, "Rx127BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) MIB_DESC(1, 0x14, "Rx255BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) MIB_DESC(1, 0x15, "Rx511BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) MIB_DESC(1, 0x16, "Rx1023BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) MIB_DESC(1, 0x17, "RxMaxBytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) MIB_DESC(1, 0x18, "RxDroppedPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) MIB_DESC(1, 0x19, "RxFilteredPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) MIB_DESC(2, 0x24, "RxGoodBytes"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) MIB_DESC(2, 0x26, "RxBadBytes"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) MIB_DESC(1, 0x0C, "TxGoodPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) MIB_DESC(1, 0x06, "TxUnicastPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) MIB_DESC(1, 0x07, "TxMulticastPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) MIB_DESC(1, 0x00, "Tx64BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) MIB_DESC(1, 0x01, "Tx127BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) MIB_DESC(1, 0x02, "Tx255BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) MIB_DESC(1, 0x03, "Tx511BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) MIB_DESC(1, 0x04, "Tx1023BytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) MIB_DESC(1, 0x05, "TxMaxBytePkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) MIB_DESC(1, 0x08, "TxSingleCollCount"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) MIB_DESC(1, 0x09, "TxMultCollCount"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) MIB_DESC(1, 0x0A, "TxLateCollCount"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) MIB_DESC(1, 0x0B, "TxExcessCollCount"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) MIB_DESC(1, 0x0D, "TxPauseCount"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) MIB_DESC(1, 0x10, "TxDroppedPkts"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) MIB_DESC(2, 0x0E, "TxGoodBytes"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return __raw_readl(priv->gswip + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __raw_writel(val, priv->gswip + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u32 val = gswip_switch_r(priv, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) val &= ~(clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) gswip_switch_w(priv, val, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u32 cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) (val & cleared) == 0, 20, 50000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return __raw_readl(priv->mdio + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __raw_writel(val, priv->mdio + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) u32 val = gswip_mdio_r(priv, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) val &= ~(clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) gswip_mdio_w(priv, val, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return __raw_readl(priv->mii + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) __raw_writel(val, priv->mii + (offset * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 val = gswip_mii_r(priv, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) val &= ~(clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) val |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) gswip_mii_w(priv, val, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* There's no MII_CFG register for the CPU port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!dsa_is_cpu_port(priv->ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) switch (port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int gswip_mdio_poll(struct gswip_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int cnt = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) while (likely(cnt--)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) usleep_range(20, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct gswip_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) err = gswip_mdio_poll(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) GSWIP_MDIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct gswip_priv *priv = bus->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = gswip_mdio_poll(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) GSWIP_MDIO_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) err = gswip_mdio_poll(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return gswip_mdio_r(priv, GSWIP_MDIO_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct dsa_switch *ds = priv->ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ds->slave_mii_bus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!ds->slave_mii_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ds->slave_mii_bus->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ds->slave_mii_bus->read = gswip_mdio_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ds->slave_mii_bus->write = gswip_mdio_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev_name(priv->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ds->slave_mii_bus->parent = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mdiobus_free(ds->slave_mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static int gswip_pce_table_entry_read(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct gswip_pce_table_entry *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u16 crtl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) GSWIP_PCE_TBL_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) GSWIP_PCE_TBL_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static int gswip_pce_table_entry_write(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct gswip_pce_table_entry *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u16 crtl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) GSWIP_PCE_TBL_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tbl->table | addr_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tbl->table | addr_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) GSWIP_PCE_TBL_CTRL_GMAP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (tbl->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (tbl->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) crtl |= GSWIP_PCE_TBL_CTRL_VLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) crtl |= GSWIP_PCE_TBL_CTRL_BAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) GSWIP_PCE_TBL_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Add the LAN port into a bridge with the CPU port by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * default. This prevents automatic forwarding of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * packages between the LAN ports when no explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * bridge is configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct gswip_pce_table_entry vlan_active = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct gswip_pce_table_entry vlan_mapping = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (port >= max_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dev_err(priv->dev, "single port for %i supported\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) vlan_active.index = port + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) vlan_active.key[0] = 0; /* vid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) vlan_active.val[0] = port + 1 /* fid */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vlan_active.valid = add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) err = gswip_pce_table_entry_write(priv, &vlan_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) vlan_mapping.index = port + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) vlan_mapping.val[0] = 0 /* vid */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) vlan_mapping.val[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) err = gswip_pce_table_entry_write(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int gswip_port_enable(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct phy_device *phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!dsa_is_user_port(ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!dsa_is_cpu_port(ds, port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) err = gswip_add_single_port_br(priv, port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* RMON Counter Enable for port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* enable port fetch/store dma & VLAN Modification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) GSWIP_FDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) GSWIP_SDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!dsa_is_cpu_port(ds, port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 mdio_phy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (phydev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) GSWIP_MDIO_PHYp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void gswip_port_disable(struct dsa_switch *ds, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!dsa_is_user_port(ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) GSWIP_FDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) GSWIP_SDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int gswip_pce_load_microcode(struct gswip_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) GSWIP_PCE_TBL_VAL(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) GSWIP_PCE_TBL_VAL(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) GSWIP_PCE_TBL_VAL(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) GSWIP_PCE_TBL_VAL(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* start the table access: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) GSWIP_PCE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) GSWIP_PCE_TBL_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* tell the switch that the microcode is loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) GSWIP_PCE_GCTRL_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) bool vlan_filtering,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct switchdev_trans *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Do not allow changing the VLAN filtering options while in bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (switchdev_trans_ph_prepare(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (vlan_filtering) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Use port based VLAN tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) gswip_switch_mask(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) GSWIP_PCE_VCTRL_VSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) GSWIP_PCE_VCTRL_VEMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) GSWIP_PCE_VCTRL(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) GSWIP_PCE_PCTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Use port based VLAN tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) gswip_switch_mask(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) GSWIP_PCE_VCTRL_VEMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) GSWIP_PCE_VCTRL_VSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) GSWIP_PCE_VCTRL(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) GSWIP_PCE_PCTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int gswip_setup(struct dsa_switch *ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) usleep_range(5000, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) gswip_switch_w(priv, 0, GSWIP_SWRES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* disable port fetch/store dma on all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (i = 0; i < priv->hw_info->max_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct switchdev_trans trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Skip the prepare phase, this shouldn't return an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * during setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) trans.ph_prepare = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) gswip_port_disable(ds, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) gswip_port_vlan_filtering(ds, i, false, &trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* enable Switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) err = gswip_pce_load_microcode(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_err(priv->dev, "writing PCE microcode failed, %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Default unknown Broadcast/Multicast/Unicast port maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * interoperability problem with this auto polling mechanism because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * their status registers think that the link is in a different state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * auto polling state machine consider the link being negotiated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * to the switch port being completely dead (RX and TX are both not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * working).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * it would work fine for a few minutes to hours and then stop, on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * other device it would no traffic could be sent or received at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Testing shows that when PHY auto polling is disabled these problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* Configure the MDIO Clock 2.5 MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* Disable the xMII interface and clear it's isolation bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) for (i = 0; i < priv->hw_info->max_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) gswip_mii_mask_cfg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* enable special tag insertion on cpu port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) GSWIP_FDMA_PCTRLp(cpu_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* accept special tag in ingress direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) GSWIP_PCE_PCTRL_0p(cpu_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) GSWIP_MAC_CTRL_2p(cpu_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) GSWIP_MAC_FLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) GSWIP_BM_QUEUE_GCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* VLAN aware Switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Flush MAC Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) GSWIP_PCE_GCTRL_0_MTFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) dev_err(priv->dev, "MAC flushing didn't finish\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) gswip_port_enable(ds, cpu_port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) enum dsa_tag_protocol mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return DSA_TAG_PROTO_GSWIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int gswip_vlan_active_create(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct net_device *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int fid, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct gswip_pce_table_entry vlan_active = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* Look for a free slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!priv->vlans[i].bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (fid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) fid = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) vlan_active.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) vlan_active.key[0] = vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) vlan_active.val[0] = fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) vlan_active.valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) err = gswip_pce_table_entry_write(priv, &vlan_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) priv->vlans[idx].bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) priv->vlans[idx].vid = vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) priv->vlans[idx].fid = fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct gswip_pce_table_entry vlan_active = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) vlan_active.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) vlan_active.valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) err = gswip_pce_table_entry_write(priv, &vlan_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) priv->vlans[idx].bridge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int gswip_vlan_add_unaware(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct net_device *bridge, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct gswip_pce_table_entry vlan_mapping = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) bool active_vlan_created = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Check if there is already a page for this bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (priv->vlans[i].bridge == bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* If this bridge is not programmed yet, add a Active VLAN table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * entry in a free slot and prepare the VLAN mapping table entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) idx = gswip_vlan_active_create(priv, bridge, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) active_vlan_created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) vlan_mapping.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* VLAN ID byte, maps to the VLAN ID of vlan active table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) vlan_mapping.val[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Read the existing VLAN mapping entry from the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) vlan_mapping.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) err = gswip_pce_table_entry_read(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* Update the VLAN mapping entry and write it to the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) vlan_mapping.val[1] |= BIT(cpu_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) vlan_mapping.val[1] |= BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) err = gswip_pce_table_entry_write(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* In case an Active VLAN was creaetd delete it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (active_vlan_created)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) gswip_vlan_active_remove(priv, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int gswip_vlan_add_aware(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct net_device *bridge, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) u16 vid, bool untagged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) bool pvid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct gswip_pce_table_entry vlan_mapping = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) bool active_vlan_created = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int fid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* Check if there is already a page for this bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (priv->vlans[i].bridge == bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (fid != -1 && fid != priv->vlans[i].fid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dev_err(priv->dev, "one bridge with multiple flow ids\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) fid = priv->vlans[i].fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (priv->vlans[i].vid == vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* If this bridge is not programmed yet, add a Active VLAN table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * entry in a free slot and prepare the VLAN mapping table entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) idx = gswip_vlan_active_create(priv, bridge, fid, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) active_vlan_created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) vlan_mapping.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* VLAN ID byte, maps to the VLAN ID of vlan active table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) vlan_mapping.val[0] = vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Read the existing VLAN mapping entry from the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) vlan_mapping.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = gswip_pce_table_entry_read(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) vlan_mapping.val[0] = vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Update the VLAN mapping entry and write it to the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) vlan_mapping.val[1] |= BIT(cpu_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) vlan_mapping.val[2] |= BIT(cpu_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) vlan_mapping.val[1] |= BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (untagged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) vlan_mapping.val[2] &= ~BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) vlan_mapping.val[2] |= BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) err = gswip_pce_table_entry_write(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* In case an Active VLAN was creaetd delete it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (active_vlan_created)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) gswip_vlan_active_remove(priv, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (pvid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static int gswip_vlan_remove(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct net_device *bridge, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) u16 vid, bool pvid, bool vlan_aware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct gswip_pce_table_entry vlan_mapping = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Check if there is already a page for this bridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (priv->vlans[i].bridge == bridge &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) (!vlan_aware || priv->vlans[i].vid == vid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) dev_err(priv->dev, "bridge to leave does not exists\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) vlan_mapping.index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) err = gswip_pce_table_entry_read(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) vlan_mapping.val[1] &= ~BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) vlan_mapping.val[2] &= ~BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) err = gswip_pce_table_entry_write(priv, &vlan_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* In case all ports are removed from the bridge, remove the VLAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) err = gswip_vlan_active_remove(priv, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev_err(priv->dev, "failed to write active VLAN: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (pvid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct net_device *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* When the bridge uses VLAN filtering we have to configure VLAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * specific bridges. No bridge is configured here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!br_vlan_enabled(bridge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) err = gswip_vlan_add_unaware(priv, bridge, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) priv->port_vlan_filter &= ~BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) priv->port_vlan_filter |= BIT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return gswip_add_single_port_br(priv, port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct net_device *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) gswip_add_single_port_br(priv, port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* When the bridge uses VLAN filtering we have to configure VLAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * specific bridges. No bridge is configured here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!br_vlan_enabled(bridge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) gswip_vlan_remove(priv, bridge, port, 0, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) const struct switchdev_obj_port_vlan *vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) unsigned int max_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int pos = max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* We only support VLAN filtering on bridges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!dsa_is_cpu_port(ds, port) && !bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Check if there is already a page for this VLAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (priv->vlans[i].bridge == bridge &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) priv->vlans[i].vid == vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* If this VLAN is not programmed yet, we have to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * one entry in the VLAN table. Make sure we start at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * next position round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* Look for a free slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!priv->vlans[pos].bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) idx = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) const struct switchdev_obj_port_vlan *vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* We have to receive all packets on the CPU port and should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * do any VLAN filtering here. This is also called with bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * NULL and then we do not know for which bridge to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (dsa_is_cpu_port(ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) const struct switchdev_obj_port_vlan *vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* We have to receive all packets on the CPU port and should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * do any VLAN filtering here. This is also called with bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * NULL and then we do not know for which bridge to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (dsa_is_cpu_port(ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void gswip_port_fast_age(struct dsa_switch *ds, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct gswip_pce_table_entry mac_bridge = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) for (i = 0; i < 2048; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) mac_bridge.index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) err = gswip_pce_table_entry_read(priv, &mac_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dev_err(priv->dev, "failed to read mac bridge: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!mac_bridge.valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) mac_bridge.valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) err = gswip_pce_table_entry_write(priv, &mac_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev_err(priv->dev, "failed to write mac bridge: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) u32 stp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) case BR_STATE_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) GSWIP_SDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) case BR_STATE_BLOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) case BR_STATE_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case BR_STATE_LEARNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case BR_STATE_FORWARDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dev_err(priv->dev, "invalid STP state: %d\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) GSWIP_SDMA_PCTRLp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) GSWIP_PCE_PCTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int gswip_port_fdb(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) const unsigned char *addr, u16 vid, bool add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct gswip_pce_table_entry mac_bridge = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) unsigned int cpu_port = priv->hw_info->cpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int fid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (priv->vlans[i].bridge == bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) fid = priv->vlans[i].fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (fid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) dev_err(priv->dev, "Port not part of a bridge\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) mac_bridge.key_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) mac_bridge.key[0] = addr[5] | (addr[4] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) mac_bridge.key[1] = addr[3] | (addr[2] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) mac_bridge.key[2] = addr[1] | (addr[0] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) mac_bridge.key[3] = fid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) mac_bridge.valid = add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) err = gswip_pce_table_entry_write(priv, &mac_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) const unsigned char *addr, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return gswip_port_fdb(ds, port, addr, vid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) const unsigned char *addr, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return gswip_port_fdb(ds, port, addr, vid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dsa_fdb_dump_cb_t *cb, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct gswip_pce_table_entry mac_bridge = {0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) unsigned char addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) for (i = 0; i < 2048; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) mac_bridge.index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) err = gswip_pce_table_entry_read(priv, &mac_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dev_err(priv->dev, "failed to write mac bridge: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (!mac_bridge.valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) addr[5] = mac_bridge.key[0] & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) addr[3] = mac_bridge.key[1] & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) addr[1] = mac_bridge.key[2] & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (mac_bridge.val[0] & BIT(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) err = cb(addr, 0, true, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) err = cb(addr, 0, false, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static void gswip_phylink_validate(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) unsigned long *supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct phylink_link_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) switch (port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (!phy_interface_mode_is_rgmii(state->interface) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) state->interface != PHY_INTERFACE_MODE_MII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) state->interface != PHY_INTERFACE_MODE_REVMII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) state->interface != PHY_INTERFACE_MODE_RMII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!phy_interface_mode_is_rgmii(state->interface) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) state->interface != PHY_INTERFACE_MODE_INTERNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) goto unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) dev_err(ds->dev, "Unsupported port: %i\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* Allow all the expected bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) phylink_set(mask, Autoneg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) phylink_set_port_modes(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) phylink_set(mask, Pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) phylink_set(mask, Asym_Pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* With the exclusion of MII, Reverse MII and Reduced MII, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * support Gigabit, including Half duplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (state->interface != PHY_INTERFACE_MODE_MII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) state->interface != PHY_INTERFACE_MODE_REVMII &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) state->interface != PHY_INTERFACE_MODE_RMII) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) phylink_set(mask, 1000baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) phylink_set(mask, 1000baseT_Half);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) phylink_set(mask, 10baseT_Half);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) phylink_set(mask, 10baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) phylink_set(mask, 100baseT_Half);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) phylink_set(mask, 100baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) bitmap_and(supported, supported, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) __ETHTOOL_LINK_MODE_MASK_NBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) bitmap_and(state->advertising, state->advertising, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) __ETHTOOL_LINK_MODE_MASK_NBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) unsupported:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) phy_modes(state->interface), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u32 mdio_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) GSWIP_MDIO_PHYp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) phy_interface_t interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (interface == PHY_INTERFACE_MODE_RMII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mii_cfg = GSWIP_MII_CFG_RATE_M50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (interface == PHY_INTERFACE_MODE_RMII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) mii_cfg = GSWIP_MII_CFG_RATE_M50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) mii_cfg = GSWIP_MII_CFG_RATE_M25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) mii_cfg = GSWIP_MII_CFG_RATE_M125;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) GSWIP_MDIO_PHYp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) GSWIP_MAC_CTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) u32 mac_ctrl_0, mdio_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) GSWIP_MAC_CTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) GSWIP_MDIO_PHYp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static void gswip_port_set_pause(struct gswip_priv *priv, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) bool tx_pause, bool rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) u32 mac_ctrl_0, mdio_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (tx_pause && rx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) GSWIP_MDIO_PHY_FCONRX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) } else if (tx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) GSWIP_MDIO_PHY_FCONRX_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) } else if (rx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) GSWIP_MDIO_PHY_FCONRX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) GSWIP_MDIO_PHY_FCONRX_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) gswip_mdio_mask(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) GSWIP_MDIO_PHY_FCONTX_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) GSWIP_MDIO_PHY_FCONRX_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) mdio_phy, GSWIP_MDIO_PHYp(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) const struct phylink_link_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) u32 miicfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) miicfg |= GSWIP_MII_CFG_LDCLKDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) switch (state->interface) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) case PHY_INTERFACE_MODE_MII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) case PHY_INTERFACE_MODE_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) miicfg |= GSWIP_MII_CFG_MODE_MIIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) case PHY_INTERFACE_MODE_REVMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) miicfg |= GSWIP_MII_CFG_MODE_MIIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) case PHY_INTERFACE_MODE_RMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* Configure the RMII clock as output: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) miicfg |= GSWIP_MII_CFG_RMII_CLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case PHY_INTERFACE_MODE_RGMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case PHY_INTERFACE_MODE_RGMII_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) case PHY_INTERFACE_MODE_RGMII_RXID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) case PHY_INTERFACE_MODE_RGMII_TXID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) miicfg |= GSWIP_MII_CFG_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) dev_err(ds->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) "Unsupported interface: %d\n", state->interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) gswip_mii_mask_cfg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) miicfg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) switch (state->interface) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) case PHY_INTERFACE_MODE_RGMII_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case PHY_INTERFACE_MODE_RGMII_RXID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) case PHY_INTERFACE_MODE_RGMII_TXID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) phy_interface_t interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!dsa_is_cpu_port(ds, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) gswip_port_set_link(priv, port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) phy_interface_t interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct phy_device *phydev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) int speed, int duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) bool tx_pause, bool rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (!dsa_is_cpu_port(ds, port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) gswip_port_set_link(priv, port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) gswip_port_set_speed(priv, port, speed, interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) gswip_port_set_duplex(priv, port, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) gswip_port_set_pause(priv, port, tx_pause, rx_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) uint8_t *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (stringset != ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) GSWIP_BM_RAM_CTRL_OPMOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) table | GSWIP_BM_RAM_CTRL_BAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) GSWIP_BM_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) GSWIP_BM_RAM_CTRL_BAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) dev_err(priv->dev, "timeout while reading table: %u, index: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) table, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) uint64_t *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct gswip_priv *priv = ds->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) const struct gswip_rmon_cnt_desc *rmon_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) u64 high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) rmon_cnt = &gswip_rmon_cnt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) data[i] = gswip_bcm_ram_entry_read(priv, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) rmon_cnt->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (rmon_cnt->size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) high = gswip_bcm_ram_entry_read(priv, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) rmon_cnt->offset + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) data[i] |= high << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (sset != ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return ARRAY_SIZE(gswip_rmon_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static const struct dsa_switch_ops gswip_switch_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) .get_tag_protocol = gswip_get_tag_protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) .setup = gswip_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) .port_enable = gswip_port_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) .port_disable = gswip_port_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) .port_bridge_join = gswip_port_bridge_join,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .port_bridge_leave = gswip_port_bridge_leave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .port_fast_age = gswip_port_fast_age,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) .port_vlan_filtering = gswip_port_vlan_filtering,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) .port_vlan_prepare = gswip_port_vlan_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .port_vlan_add = gswip_port_vlan_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .port_vlan_del = gswip_port_vlan_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .port_stp_state_set = gswip_port_stp_state_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) .port_fdb_add = gswip_port_fdb_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) .port_fdb_del = gswip_port_fdb_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) .port_fdb_dump = gswip_port_fdb_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) .phylink_validate = gswip_phylink_validate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .phylink_mac_config = gswip_phylink_mac_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) .phylink_mac_link_down = gswip_phylink_mac_link_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) .phylink_mac_link_up = gswip_phylink_mac_link_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) .get_strings = gswip_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) .get_ethtool_stats = gswip_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) .get_sset_count = gswip_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static const struct xway_gphy_match_data xrx300_gphy_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static const struct of_device_id xway_gphy_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) void *fw_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) dma_addr_t dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) ret = clk_prepare_enable(gphy_fw->clk_gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) reset_control_assert(gphy_fw->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) ret = request_firmware(&fw, gphy_fw->fw_name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev_err(dev, "failed to load firmware: %s, error: %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) gphy_fw->fw_name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /* GPHY cores need the firmware code in a persistent and contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * memory area with a 16 kB boundary aligned start address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) size = fw->size + XRX200_GPHY_FW_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (fw_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) memcpy(fw_addr, fw->data, fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) dev_err(dev, "failed to alloc firmware memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) reset_control_deassert(gphy_fw->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) static int gswip_gphy_fw_probe(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct gswip_gphy_fw *gphy_fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) struct device_node *gphy_fw_np, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) u32 gphy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) char gphyname[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (IS_ERR(gphy_fw->clk_gate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dev_err(dev, "Failed to lookup gate clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return PTR_ERR(gphy_fw->clk_gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /* Default to GE mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) gphy_mode = GPHY_MODE_GE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) switch (gphy_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) case GPHY_MODE_FE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) case GPHY_MODE_GE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (IS_ERR(gphy_fw->reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) dev_err(dev, "Failed to lookup gphy reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return PTR_ERR(gphy_fw->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) return gswip_gphy_fw_load(priv, gphy_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static void gswip_gphy_fw_remove(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct gswip_gphy_fw *gphy_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /* check if the device was fully probed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (!gphy_fw->fw_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) dev_err(priv->dev, "can not reset GPHY FW pointer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) clk_disable_unprepare(gphy_fw->clk_gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) reset_control_put(gphy_fw->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static int gswip_gphy_fw_list(struct gswip_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct device_node *gphy_fw_list_np, u32 version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct device_node *gphy_fw_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * needs a different GPHY firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) switch (version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) case GSWIP_VERSION_2_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) case GSWIP_VERSION_2_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) dev_err(dev, "unknown GSWIP version: 0x%x", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) match = of_match_node(xway_gphy_match, gphy_fw_list_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (match && match->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) priv->gphy_fw_name_cfg = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (!priv->gphy_fw_name_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) dev_err(dev, "GPHY compatible type not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (!priv->num_gphy_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) "lantiq,rcu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (IS_ERR(priv->rcu_regmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return PTR_ERR(priv->rcu_regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) sizeof(*priv->gphy_fw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (!priv->gphy_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) gphy_fw_np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) goto remove_gphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* The standalone PHY11G requires 300ms to be fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * initialized and ready for any MDIO communication after being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * taken out of reset. For the SoC-internal GPHY variant there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * is no (known) documentation for the minimum time after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * reset. Use the same value as for the standalone variant as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * some users have reported internal PHYs not being detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * without any delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) msleep(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) remove_gphy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) for (i = 0; i < priv->num_gphy_fw; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static int gswip_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct gswip_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) struct device_node *mdio_np, *gphy_fw_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) priv->gswip = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (IS_ERR(priv->gswip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return PTR_ERR(priv->gswip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) priv->mdio = devm_platform_ioremap_resource(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (IS_ERR(priv->mdio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return PTR_ERR(priv->mdio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) priv->mii = devm_platform_ioremap_resource(pdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (IS_ERR(priv->mii))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return PTR_ERR(priv->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) priv->hw_info = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (!priv->hw_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!priv->ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) priv->ds->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) priv->ds->num_ports = priv->hw_info->max_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) priv->ds->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) priv->ds->ops = &gswip_switch_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) version = gswip_switch_r(priv, GSWIP_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* bring up the mdio bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (gphy_fw_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) of_node_put(gphy_fw_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) dev_err(dev, "gphy fw probe failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* bring up the mdio bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (mdio_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) err = gswip_mdio(priv, mdio_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) dev_err(dev, "mdio probe failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) goto put_mdio_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) err = dsa_register_switch(priv->ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) dev_err(dev, "dsa switch register failed: %i\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) goto mdio_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) priv->hw_info->cpu_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) goto disable_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) dev_info(dev, "probed GSWIP version %lx mod %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) disable_switch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dsa_unregister_switch(priv->ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) mdio_bus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (mdio_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) mdiobus_unregister(priv->ds->slave_mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) mdiobus_free(priv->ds->slave_mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) put_mdio_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) of_node_put(mdio_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) for (i = 0; i < priv->num_gphy_fw; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) static int gswip_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) struct gswip_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* disable the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) dsa_unregister_switch(priv->ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (priv->ds->slave_mii_bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) mdiobus_unregister(priv->ds->slave_mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) of_node_put(priv->ds->slave_mii_bus->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) mdiobus_free(priv->ds->slave_mii_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) for (i = 0; i < priv->num_gphy_fw; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static const struct gswip_hw_info gswip_xrx200 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) .max_ports = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) .cpu_port = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) static const struct of_device_id gswip_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) MODULE_DEVICE_TABLE(of, gswip_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static struct platform_driver gswip_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) .probe = gswip_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .remove = gswip_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .name = "gswip",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .of_match_table = gswip_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) module_platform_driver(gswip_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) MODULE_LICENSE("GPL v2");