^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * sata_nv.c - NVIDIA nForce SATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2004 NVIDIA Corp. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2004 Andrew Chew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * libata documentation is available via 'make {ps|pdf}docs',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * as Documentation/driver-api/libata.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * No hardware documentation available outside of NVIDIA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This driver programs the NVIDIA SATA controller in a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * fashion as with other PCI IDE BMDMA controllers, with a few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * NV-specific details such as register offsets, SATA phy location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * hotplug info, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * CK804/MCP04 controllers support an alternate programming interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * similar to the ADMA specification (with some modifications).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * sent through the legacy interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DRV_NAME "sata_nv"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DRV_VERSION "3.5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) NV_MMIO_BAR = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) NV_PORTS = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) NV_PIO_MASK = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) NV_MWDMA_MASK = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) NV_UDMA_MASK = ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NV_PORT0_SCR_REG_OFFSET = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) NV_PORT1_SCR_REG_OFFSET = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* INT_STATUS/ENABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) NV_INT_STATUS = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) NV_INT_ENABLE = 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) NV_INT_STATUS_CK804 = 0x440,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) NV_INT_ENABLE_CK804 = 0x441,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* INT_STATUS/ENABLE bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) NV_INT_DEV = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) NV_INT_PM = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) NV_INT_ADDED = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) NV_INT_REMOVED = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) NV_INT_ALL = 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) NV_INT_MASK = NV_INT_DEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) NV_INT_ADDED | NV_INT_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* INT_CONFIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) NV_INT_CONFIG = 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) // For PCI config register 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) NV_MCP_SATA_CFG_20 = 0x50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) NV_ADMA_MAX_CPBS = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) NV_ADMA_CPB_SZ = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) NV_ADMA_APRD_SZ = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) NV_ADMA_APRD_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* BAR5 offset to ADMA general registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) NV_ADMA_GEN = 0x400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) NV_ADMA_GEN_CTL = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) NV_ADMA_NOTIFIER_CLEAR = 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* BAR5 offset to ADMA ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) NV_ADMA_PORT = 0x480,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* size of ADMA port register space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) NV_ADMA_PORT_SIZE = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* ADMA port registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) NV_ADMA_CTL = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) NV_ADMA_CPB_COUNT = 0x42,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) NV_ADMA_NEXT_CPB_IDX = 0x43,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) NV_ADMA_STAT = 0x44,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) NV_ADMA_CPB_BASE_LOW = 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) NV_ADMA_CPB_BASE_HIGH = 0x4C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) NV_ADMA_APPEND = 0x50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) NV_ADMA_NOTIFIER = 0x68,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) NV_ADMA_NOTIFIER_ERROR = 0x6C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* NV_ADMA_CTL register bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) NV_ADMA_CTL_GO = (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) NV_ADMA_CTL_AIEN = (1 << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* CPB response flag bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) NV_CPB_RESP_DONE = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) NV_CPB_RESP_ATA_ERR = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) NV_CPB_RESP_CMD_ERR = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) NV_CPB_RESP_CPB_ERR = (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* CPB control flag bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) NV_CPB_CTL_CPB_VALID = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) NV_CPB_CTL_QUEUE = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) NV_CPB_CTL_APRD_VALID = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) NV_CPB_CTL_IEN = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) NV_CPB_CTL_FPDMA = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* APRD flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) NV_APRD_WRITE = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) NV_APRD_END = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) NV_APRD_CONT = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* NV_ADMA_STAT flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) NV_ADMA_STAT_TIMEOUT = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) NV_ADMA_STAT_HOTPLUG = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) NV_ADMA_STAT_CPBERR = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) NV_ADMA_STAT_SERROR = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) NV_ADMA_STAT_IDLE = (1 << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) NV_ADMA_STAT_LEGACY = (1 << 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) NV_ADMA_STAT_STOPPED = (1 << 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) NV_ADMA_STAT_DONE = (1 << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) NV_ADMA_STAT_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* port flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* MCP55 reg offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) NV_CTL_MCP55 = 0x400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) NV_INT_STATUS_MCP55 = 0x440,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) NV_INT_ENABLE_MCP55 = 0x444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) NV_NCQ_REG_MCP55 = 0x448,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* MCP55 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) NV_INT_ALL_MCP55 = 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* SWNCQ ENABLE BITS*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) NV_CTL_PRI_SWNCQ = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) NV_CTL_SEC_SWNCQ = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* SW NCQ status bits*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) NV_SWNCQ_IRQ_DEV = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) NV_SWNCQ_IRQ_PM = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) NV_SWNCQ_IRQ_ADDED = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) NV_SWNCQ_IRQ_REMOVED = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) NV_SWNCQ_IRQ_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* ADMA Physical Region Descriptor - one SG segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct nv_adma_prd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __le64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __le32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u8 packet_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) enum nv_adma_regbits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) CMDEND = (1 << 15), /* end of command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) WNB = (1 << 14), /* wait-not-BSY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) IGN = (1 << 13), /* ignore this entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) DA2 = (1 << (2 + 8)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) DA1 = (1 << (1 + 8)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) DA0 = (1 << (0 + 8)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* ADMA Command Parameter Block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) The first 5 SG segments are stored inside the Command Parameter Block itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) If there are more than 5 segments the remainder are stored in a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) memory area indicated by next_aprd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct nv_adma_cpb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u8 resp_flags; /* 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u8 reserved1; /* 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u8 ctl_flags; /* 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* len is length of taskfile in 64 bit words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u8 len; /* 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u8 tag; /* 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u8 next_cpb_idx; /* 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __le16 reserved2; /* 6-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __le16 tf[12]; /* 8-31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct nv_adma_prd aprd[5]; /* 32-111 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __le64 next_aprd; /* 112-119 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __le64 reserved3; /* 120-127 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct nv_adma_port_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct nv_adma_cpb *cpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_addr_t cpb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct nv_adma_prd *aprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) dma_addr_t aprd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void __iomem *ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) void __iomem *gen_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void __iomem *notifier_clear_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u64 adma_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int last_issue_ncq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct nv_host_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct defer_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 defer_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned int tag[ATA_MAX_QUEUE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) enum ncq_saw_flag_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ncq_saw_d2h = (1U << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ncq_saw_dmas = (1U << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ncq_saw_sdb = (1U << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ncq_saw_backout = (1U << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct nv_swncq_port_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct ata_bmdma_prd *prd; /* our SG list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dma_addr_t prd_dma; /* and its DMA mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void __iomem *sactive_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void __iomem *irq_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void __iomem *tag_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 qc_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int last_issue_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* fifo circular queue to store deferral command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct defer_queue defer_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* for NCQ interrupt analysis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 dhfis_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 dmafis_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 sdbfis_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned int ncq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int nv_pci_device_resume(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void nv_ck804_host_stop(struct ata_host *host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int nv_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void nv_nf2_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void nv_nf2_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void nv_ck804_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void nv_ck804_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static int nv_adma_slave_config(struct scsi_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void nv_adma_irq_clear(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int nv_adma_port_start(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void nv_adma_port_stop(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int nv_adma_port_resume(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void nv_adma_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void nv_adma_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void nv_adma_error_handler(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void nv_adma_host_stop(struct ata_host *host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void nv_mcp55_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void nv_mcp55_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void nv_swncq_error_handler(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int nv_swncq_slave_config(struct scsi_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int nv_swncq_port_start(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int nv_swncq_port_resume(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) enum nv_host_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) GENERIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) NFORCE2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) CK804,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ADMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) MCP5x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) SWNCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static const struct pci_device_id nv_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) { } /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static struct pci_driver nv_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .id_table = nv_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .probe = nv_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .resume = nv_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .remove = ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static struct scsi_host_template nv_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static struct scsi_host_template nv_adma_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ATA_NCQ_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .can_queue = NV_ADMA_MAX_CPBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) .dma_boundary = NV_ADMA_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .slave_configure = nv_adma_slave_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct scsi_host_template nv_swncq_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ATA_NCQ_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .can_queue = ATA_MAX_QUEUE - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .sg_tablesize = LIBATA_MAX_PRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .dma_boundary = ATA_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .slave_configure = nv_swncq_slave_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * NV SATA controllers have various different problems with hardreset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * protocol depending on the specific controller and device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * GENERIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * bko11195 reports that link doesn't come online after hardreset on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * generic nv's and there have been several other similar reports on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * linux-ide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * bko12351#c23 reports that warmplug on MCP61 doesn't work with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * softreset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * NF2/3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * bko3352 reports nf2/3 controllers can't determine device signature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * reliably after hardreset. The following thread reports detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * failure on cold boot with the standard debouncing timing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * http://thread.gmane.org/gmane.linux.ide/34098
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * bko12176 reports that hardreset fails to bring up the link during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * boot on nf2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * CK804:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * For initial probing after boot and hot plugging, hardreset mostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * works fine on CK804 but curiously, reprobing on the initial port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * FIS in somewhat undeterministic way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * SWNCQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * hardreset should be used and hardreset can't report proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * signature, which suggests that mcp5x is closer to nf2 as long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * reset quirkiness is concerned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * bko12703 reports that boot probing fails for intel SSD with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * hardreset. Link fails to come online. Softreset works fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * The failures are varied but the following patterns seem true for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * all flavors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * - Softreset during boot always works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * - Hardreset during boot sometimes fails to bring up the link on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * certain comibnations and device signature acquisition is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * unreliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * - Hardreset is often necessary after hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * So, preferring softreset for boot probing and error handling (as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * hardreset might bring down the link) but using hardreset for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * post-boot probing should work around the above issues in most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * cases. Define nv_hardreset() which only kicks in for post-boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * probing and use it for all variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static struct ata_port_operations nv_generic_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .lost_interrupt = ATA_OP_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .scr_read = nv_scr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .scr_write = nv_scr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .hardreset = nv_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static struct ata_port_operations nv_nf2_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .inherits = &nv_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .freeze = nv_nf2_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .thaw = nv_nf2_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static struct ata_port_operations nv_ck804_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .inherits = &nv_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .freeze = nv_ck804_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .thaw = nv_ck804_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .host_stop = nv_ck804_host_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static struct ata_port_operations nv_adma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .inherits = &nv_ck804_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .check_atapi_dma = nv_adma_check_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .sff_tf_read = nv_adma_tf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .qc_defer = ata_std_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .qc_prep = nv_adma_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .qc_issue = nv_adma_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .sff_irq_clear = nv_adma_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .freeze = nv_adma_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) .thaw = nv_adma_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .error_handler = nv_adma_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) .post_internal_cmd = nv_adma_post_internal_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) .port_start = nv_adma_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) .port_stop = nv_adma_port_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .port_suspend = nv_adma_port_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .port_resume = nv_adma_port_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .host_stop = nv_adma_host_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static struct ata_port_operations nv_swncq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .inherits = &nv_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .qc_defer = ata_std_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .qc_prep = nv_swncq_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .qc_issue = nv_swncq_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .freeze = nv_mcp55_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .thaw = nv_mcp55_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .error_handler = nv_swncq_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .port_suspend = nv_swncq_port_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) .port_resume = nv_swncq_port_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .port_start = nv_swncq_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct nv_pi_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) irq_handler_t irq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct scsi_host_template *sht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #define NV_PI_PRIV(_irq_handler, _sht) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static const struct ata_port_info nv_port_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* generic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .flags = ATA_FLAG_SATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) .port_ops = &nv_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* nforce2/3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) .flags = ATA_FLAG_SATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .port_ops = &nv_nf2_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* ck804 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .flags = ATA_FLAG_SATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .port_ops = &nv_ck804_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* ADMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .port_ops = &nv_adma_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* MCP5x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .flags = ATA_FLAG_SATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) .port_ops = &nv_generic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* SWNCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .pio_mask = NV_PIO_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) .mwdma_mask = NV_MWDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .udma_mask = NV_UDMA_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) .port_ops = &nv_swncq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) MODULE_AUTHOR("NVIDIA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) MODULE_VERSION(DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static bool adma_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static bool swncq_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static bool msi_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static void nv_adma_register_mode(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u16 tmp, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ndelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (count == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ndelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (count == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ata_port_warn(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void nv_adma_mode(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) u16 tmp, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) while (((status & NV_ADMA_STAT_LEGACY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ndelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (count == 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ata_port_warn(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int nv_adma_slave_config(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct ata_port *ap = ata_shost_to_port(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct nv_adma_port_priv *port0, *port1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long segment_boundary, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned short sg_tablesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int adma_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 current_reg, new_reg, config_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rc = ata_scsi_slave_config(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Not a proper libata device, ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * NVIDIA reports that ADMA mode does not support ATAPI commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Therefore ATAPI commands are sent through the legacy interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * However, the legacy interface only supports 32-bit DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Restrict DMA parameters as required by the legacy interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * when an ATAPI device is connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) segment_boundary = ATA_DMA_BOUNDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Subtract 1 since an extra entry may be needed for padding, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) libata-scsi.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) sg_tablesize = LIBATA_MAX_PRD - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Since the legacy DMA engine is in use, we need to disable ADMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) on the port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) adma_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) nv_adma_register_mode(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) segment_boundary = NV_ADMA_DMA_BOUNDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) adma_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ap->port_no == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (adma_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) new_reg = current_reg | config_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) new_reg = current_reg & ~config_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (current_reg != new_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) port0 = ap->host->ports[0]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) port1 = ap->host->ports[1]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * We have to set the DMA mask to 32-bit if either port is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * ATAPI mode, since they are on the same PCI device which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * used for DMA mapping. If either SCSI device is not allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * yet, it's OK since that port will discover its correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * setting when it does get allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) blk_queue_max_segments(sdev->request_queue, sg_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ata_port_info(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) (unsigned long long)*ap->host->dev->dma_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) segment_boundary, sg_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Other than when internal or pass-through commands are executed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) the only time this function will be called in ADMA mode will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if a command fails. In the failure case we don't care about going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) into register mode with ADMA commands pending, as the commands will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) all shortly be aborted anyway. We assume that NCQ commands are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) issued via passthrough, which is the only way that switching into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ADMA mode could abort outstanding commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) nv_adma_register_mode(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ata_sff_tf_read(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) unsigned int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (tf->flags & ATA_TFLAG_ISADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (tf->flags & ATA_TFLAG_LBA48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (tf->flags & ATA_TFLAG_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) while (idx < 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) cpb[idx++] = cpu_to_le16(IGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) u8 flags = pp->cpb[cpb_num].resp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (unlikely((force_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) flags & (NV_CPB_RESP_ATA_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) NV_CPB_RESP_CMD_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) NV_CPB_RESP_CPB_ERR)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int freeze = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (flags & NV_CPB_RESP_ATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ata_ehi_push_desc(ehi, "ATA error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ehi->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else if (flags & NV_CPB_RESP_CMD_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ata_ehi_push_desc(ehi, "CMD error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ehi->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) } else if (flags & NV_CPB_RESP_CPB_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ata_ehi_push_desc(ehi, "CPB error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ehi->err_mask |= AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) freeze = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* notifier error, but no error in CPB flags? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ata_ehi_push_desc(ehi, "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ehi->err_mask |= AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) freeze = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Kill all commands. EH will determine what actually failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ata_port_abort(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (likely(flags & NV_CPB_RESP_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* freeze if hotplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* bail out if not our interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!(irq_stat & NV_INT_DEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* DEV interrupt w/ no active qc? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ata_sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* handle interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return ata_bmdma_port_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) int i, handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) u32 notifier_clears[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u32 gen_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) u32 notifier, notifier_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) notifier_clears[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* if ADMA is disabled, use standard ata interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) >> (NV_INT_PORT_SHIFT * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) handled += nv_host_intr(ap, irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* if in ATA register mode, check for standard interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) >> (NV_INT_PORT_SHIFT * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (ata_tag_valid(ap->link.active_tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /** NV_INT_DEV indication seems unreliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) at times at least in ADMA mode. Force it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) on always when a command is active, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) prevent losing interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) irq_stat |= NV_INT_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) handled += nv_host_intr(ap, irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) notifier = readl(mmio + NV_ADMA_NOTIFIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) notifier_clears[i] = notifier | notifier_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) !notifier_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Clear status. Ensure the controller sees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * clearing before we start looking at any of the CPB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * statuses, so that any CPB completions after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * point in the handler will raise another interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) writew(status, mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) readw(mmio + NV_ADMA_STAT); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) handled++; /* irq handled if we got here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* freeze if hotplugged or controller error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) NV_ADMA_STAT_HOTUNPLUG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) NV_ADMA_STAT_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) NV_ADMA_STAT_SERROR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (status & NV_ADMA_STAT_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ehi->err_mask |= AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ata_ehi_push_desc(ehi, "timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) } else if (status & NV_ADMA_STAT_HOTPLUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ata_ehi_hotplugged(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ata_ehi_push_desc(ehi, "hotplug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ata_ehi_hotplugged(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ata_ehi_push_desc(ehi, "hot unplug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) } else if (status & NV_ADMA_STAT_SERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* let EH analyze SError and figure out cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ata_ehi_push_desc(ehi, "SError");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ata_ehi_push_desc(ehi, "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (status & (NV_ADMA_STAT_DONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) NV_ADMA_STAT_CPBERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) NV_ADMA_STAT_CMD_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) u32 check_commands = notifier_clears[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) u32 done_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int pos, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (status & NV_ADMA_STAT_CPBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* check all active commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (ata_tag_valid(ap->link.active_tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) check_commands = 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) check_commands = ap->link.sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* check CPBs for completed commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) while ((pos = ffs(check_commands))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) pos--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rc = nv_adma_check_cpb(ap, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) notifier_error & (1 << pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (rc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) done_mask |= 1 << pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) else if (unlikely(rc < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) check_commands = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) check_commands &= ~(1 << pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (notifier_clears[0] || notifier_clears[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Note: Both notifier clear registers must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if either is set, even if one is zero, according to NVIDIA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct nv_adma_port_priv *pp = host->ports[0]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) writel(notifier_clears[0], pp->notifier_clear_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) pp = host->ports[1]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) writel(notifier_clears[1], pp->notifier_clear_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void nv_adma_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) nv_ck804_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* clear any outstanding CK804 notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static void nv_adma_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) nv_ck804_thaw(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void nv_adma_irq_clear(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u32 notifier_clears[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ata_bmdma_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* clear any outstanding CK804 notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* clear ADMA status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) writew(0xffff, mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* clear notifiers - note both ports need to be written with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) something even though we are only clearing on one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (ap->port_no == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) notifier_clears[0] = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) notifier_clears[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) notifier_clears[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) notifier_clears[1] = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) pp = ap->host->ports[0]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) writel(notifier_clears[0], pp->notifier_clear_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pp = ap->host->ports[1]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) writel(notifier_clears[1], pp->notifier_clear_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ata_bmdma_post_internal_cmd(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static int nv_adma_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct device *dev = ap->host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct nv_adma_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) void *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dma_addr_t mem_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) void __iomem *mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * pad buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* we might fallback to bmdma, allocate bmdma resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) rc = ata_bmdma_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ap->port_no * NV_ADMA_PORT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pp->ctl_block = mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) pp->notifier_clear_block = pp->gen_block +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Now that the legacy PRD and padding buffer are allocated we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * raise the DMA mask to allocate the CPB/APRD table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) pp->adma_dma_mask = *dev->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) &mem_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * First item in chunk of DMA memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * 128-byte command parameter block (CPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * one for each command tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) pp->cpb = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) pp->cpb_dma = mem_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * Second item: block of ADMA_SGTBL_LEN s/g entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pp->aprd = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pp->aprd_dma = mem_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ap->private_data = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* clear any outstanding interrupt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) writew(0xffff, mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* initialize port variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pp->flags = NV_ADMA_PORT_REGISTER_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* clear CPB fetch count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) writew(0, mmio + NV_ADMA_CPB_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* clear GO for register mode, enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static void nv_adma_port_stop(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) writew(0, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Go to register mode - clears GO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) nv_adma_register_mode(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* clear CPB fetch count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) writew(0, mmio + NV_ADMA_CPB_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* disable interrupt, shut down port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) writew(0, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int nv_adma_port_resume(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* set CPB block location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* clear any outstanding interrupt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) writew(0xffff, mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* initialize port variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* clear CPB fetch count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) writew(0, mmio + NV_ADMA_CPB_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* clear GO for register mode, enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static void nv_adma_setup_port(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct ata_ioports *ioport = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ioport->cmd_addr = mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ioport->data_addr = mmio + (ATA_REG_DATA * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ioport->error_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ioport->status_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ioport->altstatus_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ioport->ctl_addr = mmio + 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static int nv_adma_host_init(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* enable ADMA on the ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) NV_MCP_SATA_CFG_20_PORT1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) for (i = 0; i < host->n_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) nv_adma_setup_port(host->ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct nv_adma_prd *aprd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (qc->tf.flags & ATA_TFLAG_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) flags |= NV_APRD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (idx == qc->n_elem - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) flags |= NV_APRD_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) else if (idx != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) flags |= NV_APRD_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) aprd->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) aprd->packet_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct nv_adma_prd *aprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned int si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) aprd = (si < 5) ? &cpb->aprd[si] :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) nv_adma_fill_aprd(qc, sg, si, aprd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (si > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) cpb->next_aprd = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* ADMA engine can only be used for non-ATAPI DMA commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) or interrupt-driven no-data commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) (qc->tf.flags & ATA_TFLAG_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) (qc->tf.protocol == ATA_PROT_NODATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) NV_CPB_CTL_IEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (nv_adma_use_reg_mode(qc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) (qc->flags & ATA_QCFLAG_DMAMAP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) nv_adma_register_mode(qc->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ata_bmdma_qc_prep(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) cpb->resp_flags = NV_CPB_RESP_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) cpb->ctl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) cpb->len = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) cpb->tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) cpb->next_cpb_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* turn on NCQ flags for NCQ commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (qc->tf.protocol == ATA_PROT_NCQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) VPRINTK("qc->flags = 0x%lx\n", qc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (qc->flags & ATA_QCFLAG_DMAMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) nv_adma_fill_sg(qc, cpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) ctl_flags |= NV_CPB_CTL_APRD_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) until we are finished filling in all of the contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) cpb->ctl_flags = ctl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) cpb->resp_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct nv_adma_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* We can't handle result taskfile with NCQ commands, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) retrieving the taskfile switches us out of ADMA mode and would abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) existing commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) (qc->flags & ATA_QCFLAG_RESULT_TF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (nv_adma_use_reg_mode(qc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* use ATA register mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) (qc->flags & ATA_QCFLAG_DMAMAP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) nv_adma_register_mode(qc->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return ata_bmdma_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) nv_adma_mode(qc->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* write append register, command tag in lower 8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) and (number of cpbs to append -1) in top 8 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (curr_ncq != pp->last_issue_ncq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* Seems to need some delay before switching between NCQ and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) non-NCQ commands, else we get command timeouts and such. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) pp->last_issue_ncq = curr_ncq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) DPRINTK("Issued tag %u\n", qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) handled += ata_bmdma_port_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * No request pending? Clear interrupt status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * anyway, in case there's one pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int i, handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) handled += nv_host_intr(host->ports[i], irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) irq_stat >>= NV_INT_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) u8 irq_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ret = nv_do_interrupt(host, irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) u8 irq_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ret = nv_do_interrupt(host, irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (sc_reg > SCR_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (sc_reg > SCR_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static int nv_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct ata_eh_context *ehc = &link->eh_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* Do hardreset iff it's post-boot probing, please read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * comment above port ops for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) !ata_dev_enabled(link->device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) const unsigned long *timing = sata_ehc_deb_timing(ehc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!(ehc->i.flags & ATA_EHI_QUIET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ata_link_info(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) "nv: skipping hardreset on occupied port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /* make sure the link is online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) rc = sata_link_resume(link, timing, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* whine about phy resume failure but proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (rc && rc != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ata_link_warn(link, "failed to resume link (errno=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* device signature acquisition is unreliable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static void nv_nf2_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) int shift = ap->port_no * NV_INT_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) mask = ioread8(scr_addr + NV_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) mask &= ~(NV_INT_ALL << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) iowrite8(mask, scr_addr + NV_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static void nv_nf2_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int shift = ap->port_no * NV_INT_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) mask = ioread8(scr_addr + NV_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) mask |= (NV_INT_MASK << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) iowrite8(mask, scr_addr + NV_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static void nv_ck804_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) int shift = ap->port_no * NV_INT_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) mask = readb(mmio_base + NV_INT_ENABLE_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) mask &= ~(NV_INT_ALL << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static void nv_ck804_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int shift = ap->port_no * NV_INT_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) mask = readb(mmio_base + NV_INT_ENABLE_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) mask |= (NV_INT_MASK << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void nv_mcp55_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) mask &= ~(NV_INT_ALL_MCP55 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static void nv_mcp55_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) mask |= (NV_INT_MASK_MCP55 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void nv_adma_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct nv_adma_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) void __iomem *mmio = pp->ctl_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) u32 status = readw(mmio + NV_ADMA_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) ata_port_err(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) "EH in ADMA mode, notifier 0x%X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) "next cpb count 0x%X next cpb idx 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) notifier, notifier_error, gen_ctl, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) cpb_count, next_cpb_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct nv_adma_cpb *cpb = &pp->cpb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ap->link.sactive & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) ata_port_err(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) i, cpb->ctl_flags, cpb->resp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /* Push us back into port register mode for error handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) nv_adma_register_mode(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /* Mark all of the CPBs as invalid to prevent them from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) being executed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /* clear CPB fetch count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) writew(0, mmio + NV_ADMA_CPB_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* Reset channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) tmp = readw(mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) readw(mmio + NV_ADMA_CTL); /* flush posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ata_bmdma_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct defer_queue *dq = &pp->defer_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* queue is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) dq->defer_bits |= (1 << qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct defer_queue *dq = &pp->defer_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (dq->head == dq->tail) /* null queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) WARN_ON(!(dq->defer_bits & (1 << tag)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) dq->defer_bits &= ~(1 << tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static void nv_swncq_fis_reinit(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) pp->dhfis_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) pp->dmafis_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) pp->sdbfis_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) pp->ncq_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static void nv_swncq_pp_reinit(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct defer_queue *dq = &pp->defer_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) dq->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) dq->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) dq->defer_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) pp->qc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) pp->last_issue_tag = ATA_TAG_POISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) nv_swncq_fis_reinit(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) writew(fis, pp->irq_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static void __ata_bmdma_stop(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct ata_queued_cmd qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) qc.ap = ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ata_bmdma_stop(&qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) static void nv_swncq_ncq_stop(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) u32 sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) u32 done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ap->qc_active, ap->link.sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ata_port_err(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ap->ops->sff_check_status(ap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) ioread8(ap->ioaddr.error_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) sactive = readl(pp->sactive_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) done_mask = pp->qc_active ^ sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) for (i = 0; i < ATA_MAX_QUEUE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) u8 err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (pp->qc_active & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) else if (done_mask & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ata_port_err(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) "tag 0x%x: %01x %01x %01x %01x %s\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) (pp->dhfis_bits >> i) & 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) (pp->dmafis_bits >> i) & 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) (pp->sdbfis_bits >> i) & 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) (sactive >> i) & 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) (err ? "error! tag doesn't exit" : " "));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) nv_swncq_pp_reinit(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) __ata_bmdma_stop(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) nv_swncq_irq_clear(ap, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static void nv_swncq_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) struct ata_eh_context *ehc = &ap->link.eh_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (ap->link.sactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) nv_swncq_ncq_stop(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ehc->i.action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ata_bmdma_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* clear irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) writel(~0, mmio + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* disable irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) writel(0, mmio + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /* disable swncq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) tmp = readl(mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) writel(tmp, mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) static int nv_swncq_port_resume(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /* clear irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) writel(~0, mmio + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /* enable irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* enable swncq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) tmp = readl(mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static void nv_swncq_host_init(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) void __iomem *mmio = host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) u8 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* disable ECO 398 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) pci_read_config_byte(pdev, 0x7f, ®val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) regval &= ~(1 << 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) pci_write_config_byte(pdev, 0x7f, regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /* enable swncq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) tmp = readl(mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) VPRINTK("HOST_CTL:0x%X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /* enable irq intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) tmp = readl(mmio + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) VPRINTK("HOST_ENABLE:0x%X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) /* clear port irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) writel(~0x0, mmio + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static int nv_swncq_slave_config(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct ata_port *ap = ata_shost_to_port(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct ata_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) u8 rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) u8 check_maxtor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) unsigned char model_num[ATA_ID_PROD_LEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) rc = ata_scsi_slave_config(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /* Not a proper libata device, ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) dev = &ap->link.device[sdev->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) /* if MCP51 and Maxtor, then disable ncq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) check_maxtor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) pci_read_config_byte(pdev, 0x8, &rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (rev <= 0xa2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) check_maxtor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (!check_maxtor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (strncmp(model_num, "Maxtor", 6) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) ata_scsi_change_queue_depth(sdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) sdev->queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static int nv_swncq_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct device *dev = ap->host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) struct nv_swncq_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* we might fallback to bmdma, allocate bmdma resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) rc = ata_bmdma_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) &pp->prd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (!pp->prd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) ap->private_data = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (qc->tf.protocol != ATA_PROT_NCQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) ata_bmdma_qc_prep(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) nv_swncq_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct ata_bmdma_prd *prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) unsigned int si, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) u32 addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) u32 sg_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) addr = (u32)sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) sg_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) while (sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) offset = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if ((offset + sg_len) > 0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) len = 0x10000 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) prd[idx].addr = cpu_to_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) prd[idx].flags_len = cpu_to_le32(len & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) sg_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (qc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) DPRINTK("Enter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) writel((1 << qc->hw_tag), pp->sactive_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) pp->last_issue_tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) pp->dhfis_bits &= ~(1 << qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) pp->dmafis_bits &= ~(1 << qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) pp->qc_active |= (0x1 << qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ap->ops->sff_exec_command(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) DPRINTK("Issued tag %u\n", qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (qc->tf.protocol != ATA_PROT_NCQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) return ata_bmdma_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) DPRINTK("Enter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!pp->qc_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) nv_swncq_issue_atacmd(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) u32 serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /* AHCI needs SError cleared; otherwise, it might lock up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) sata_scr_read(&ap->link, SCR_ERROR, &serror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) sata_scr_write(&ap->link, SCR_ERROR, serror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /* analyze @irq_stat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (fis & NV_SWNCQ_IRQ_ADDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ata_ehi_push_desc(ehi, "hot plug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) else if (fis & NV_SWNCQ_IRQ_REMOVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ata_ehi_push_desc(ehi, "hot unplug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) ata_ehi_hotplugged(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* okay, let's hand over to EH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ehi->serror |= serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static int nv_swncq_sdbfis(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) u32 sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) u32 done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) u8 host_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) u8 lack_dhfis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) host_stat = ap->ops->bmdma_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (unlikely(host_stat & ATA_DMA_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* error when transferring data to/from memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) ehi->err_mask |= AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) ehi->action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) __ata_bmdma_stop(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) sactive = readl(pp->sactive_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) done_mask = pp->qc_active ^ sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) pp->qc_active &= ~done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) pp->dhfis_bits &= ~done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) pp->dmafis_bits &= ~done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) pp->sdbfis_bits |= done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (!ap->qc_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) DPRINTK("over\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) nv_swncq_pp_reinit(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (pp->qc_active & pp->dhfis_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if ((pp->ncq_flags & ncq_saw_backout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) (pp->qc_active ^ pp->dhfis_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* if the controller can't get a device to host register FIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * The driver needs to reissue the new command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) lack_dhfis = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) DPRINTK("id 0x%x QC: qc_active 0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) "SWNCQ:qc_active 0x%X defer_bits %X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ap->print_id, ap->qc_active, pp->qc_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) pp->defer_queue.defer_bits, pp->dhfis_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) pp->dmafis_bits, pp->last_issue_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) nv_swncq_fis_reinit(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (lack_dhfis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) qc = ata_qc_from_tag(ap, pp->last_issue_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) nv_swncq_issue_atacmd(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (pp->defer_queue.defer_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* send deferral queue command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) qc = nv_swncq_qc_from_dq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) WARN_ON(qc == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) nv_swncq_issue_atacmd(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) static inline u32 nv_swncq_tag(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) tag = readb(pp->tag_block) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return (tag & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static void nv_swncq_dmafis(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) unsigned int rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) u8 dmactl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) __ata_bmdma_stop(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) tag = nv_swncq_tag(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) DPRINTK("dma setup tag 0x%x\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (unlikely(!qc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) rw = qc->tf.flags & ATA_TFLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) /* load PRD table addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) /* specify data direction, triple-check start bit is clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) dmactl &= ~ATA_DMA_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) dmactl |= ATA_DMA_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct nv_swncq_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) u32 serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) u8 ata_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ata_stat = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) nv_swncq_irq_clear(ap, fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (!fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (ap->pflags & ATA_PFLAG_FROZEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) nv_swncq_hotplug(ap, fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (!pp->qc_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (ata_stat & ATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) ehi->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) ehi->serror |= serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) ehi->action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (fis & NV_SWNCQ_IRQ_BACKOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) /* If the IRQ is backout, driver must issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * the new command again some time later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) pp->ncq_flags |= ncq_saw_backout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (fis & NV_SWNCQ_IRQ_SDBFIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) pp->ncq_flags |= ncq_saw_sdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) ap->print_id, pp->qc_active, pp->dhfis_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) pp->dmafis_bits, readl(pp->sactive_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (nv_swncq_sdbfis(ap) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) goto irq_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* The interrupt indicates the new command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * was transmitted correctly to the drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) pp->ncq_flags |= ncq_saw_d2h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) ata_ehi_push_desc(ehi, "illegal fis transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ehi->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) ehi->action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) goto irq_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) !(pp->ncq_flags & ncq_saw_dmas)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) ata_stat = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (ata_stat & ATA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) goto irq_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (pp->defer_queue.defer_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) DPRINTK("send next command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) qc = nv_swncq_qc_from_dq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) nv_swncq_issue_atacmd(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (fis & NV_SWNCQ_IRQ_DMASETUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /* program the dma controller with appropriate PRD buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * and start the DMA transfer for requested command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) pp->ncq_flags |= ncq_saw_dmas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) nv_swncq_dmafis(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) irq_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) irq_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ata_ehi_push_desc(ehi, "fis:0x%x", fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) u32 irq_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (ap->link.sactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) nv_swncq_host_interrupt(ap, (u16)irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (irq_stat) /* reserve Hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) nv_swncq_irq_clear(ap, 0xfff0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) handled += nv_host_intr(ap, (u8)irq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) const struct ata_port_info *ppi[] = { NULL, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) struct nv_pi_priv *ipriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct nv_host_priv *hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) u32 bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) unsigned long type = ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) // Make sure this is a SATA controller by counting the number of bars
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) // (NVIDIA SATA controllers will always have six bars). Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) // it's an IDE controller and we ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (pci_resource_start(pdev, bar) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) /* determine type and allocate host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) if (type == CK804 && adma_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) dev_notice(&pdev->dev, "Using ADMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) type = ADMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) } else if (type == MCP5x && swncq_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) dev_notice(&pdev->dev, "Using SWNCQ mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) type = SWNCQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) ppi[0] = &nv_port_info[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) ipriv = ppi[0]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (!hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) hpriv->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) host->private_data = hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) /* request and iomap NV_MMIO_BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /* configure SCR access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) base = host->iomap[NV_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* enable SATA space for CK804 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (type >= CK804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) u8 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* init ADMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (type == ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) rc = nv_adma_host_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) } else if (type == SWNCQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) nv_swncq_host_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (msi_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) dev_notice(&pdev->dev, "Using MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) pci_enable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static int nv_pci_device_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) struct ata_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct nv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) rc = ata_pci_device_do_resume(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (hpriv->type >= CK804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) u8 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (hpriv->type == ADMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct nv_adma_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) /* enable/disable ADMA on the ports appropriately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) pp = host->ports[0]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) pp = host->ports[1]->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static void nv_ck804_host_stop(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) u8 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /* disable SATA space for CK804 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) static void nv_adma_host_stop(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /* disable ADMA on the ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) NV_MCP_SATA_CFG_20_PORT1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) nv_ck804_host_stop(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) module_pci_driver(nv_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) module_param_named(adma, adma_enabled, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) module_param_named(swncq, swncq_enabled, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) module_param_named(msi, msi_enabled, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");