^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Broadcom specific AMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PCI Core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2005, 2011, Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Licensed under the GNU/GPL. See COPYING for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "bcma_private.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bcma/bcma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /**************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * R/W ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) **************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) v = BCMA_CORE_PCI_MDIODATA_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) v |= BCMA_CORE_PCI_MDIODATA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) v |= BCMA_CORE_PCI_MDIODATA_TA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) v |= (phy << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) for (i = 0; i < 200; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int max_retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u16 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* enable mdio access to SERDES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (pc->core->id.rev >= 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) max_retries = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bcma_pcie_mdio_set_phy(pc, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) v |= BCMA_CORE_PCI_MDIODATA_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) v |= BCMA_CORE_PCI_MDIODATA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) v |= BCMA_CORE_PCI_MDIODATA_TA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Wait for the device to complete the transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = 0; i < max_retries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 address, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int max_retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* enable mdio access to SERDES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (pc->core->id.rev >= 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) max_retries = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bcma_pcie_mdio_set_phy(pc, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) v |= BCMA_CORE_PCI_MDIODATA_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) v |= BCMA_CORE_PCI_MDIODATA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) v |= BCMA_CORE_PCI_MDIODATA_TA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) v |= data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Wait for the device to complete the transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) for (i = 0; i < max_retries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 address, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bcma_pcie_mdio_write(pc, device, address, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return bcma_pcie_mdio_read(pc, device, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /**************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Early init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) **************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct bcma_device *core = pc->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u16 val16, core_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) uint regoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) core_index = (u16)core->core_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) val16 = pcicore_read16(pc, regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) != core_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pcicore_write16(pc, regoff, val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Apply some early fixes required before accessing SPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * See also si_pci_fixcfg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (pc->early_setup_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (pc->hostmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bcma_core_pci_fixcfg(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pc->early_setup_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /**************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Workarounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) **************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) BCMA_CORE_PCI_SERDES_RX_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bcma_pcicore_polarity_workaround(pc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BCMA_CORE_PCI_SERDES_PLL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) BCMA_CORE_PCI_SERDES_PLL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Needs to happen when coming out of 'standby'/'hibernate' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u16 val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) uint regoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) val16 = pcicore_read16(pc, regoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pcicore_write16(pc, regoff, val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /**************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) **************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) bcma_pcicore_serdes_workaround(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bcma_core_pci_config_fixup(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void bcma_core_pci_init(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (pc->setup_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bcma_core_pci_early_init(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (pc->hostmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) bcma_core_pci_hostmode_init(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bcma_core_pci_clientmode_init(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct bcma_drv_pci *pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u16 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (bus->hosttype != BCMA_HOSTTYPE_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pc = &bus->drv_pci[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) data = up ? 0x74 : 0x7C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) data = up ? 0x75 : 0x7D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void bcma_core_pci_up(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bcma_core_pci_extend_L1timer(pc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void bcma_core_pci_down(struct bcma_drv_pci *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bcma_core_pci_extend_L1timer(pc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }