^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/ata/sata_dwc_460ex.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Synopsys DesignWare Cores (DWC) SATA host driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Mark Miesfeld <mmiesfeld@amcc.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 2008 DENX Software Engineering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Based on versions provided by AMCC and Synopsys which are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright 2006 Applied Micro Circuits Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef CONFIG_SATA_DWC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_SATA_DWC_VDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DEBUG_NCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "libata.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* These two are defined in "libata.h" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #undef DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #undef DRV_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DRV_NAME "sata-dwc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DRV_VERSION "1.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define sata_dwc_writel(a, v) writel_relaxed(v, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define sata_dwc_readl(a) readl_relaxed(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifndef NO_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define NO_IRQ 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) SATA_DWC_MAX_PORTS = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) SATA_DWC_SCR_OFFSET = 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) SATA_DWC_REG_OFFSET = 0x64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* DWC SATA Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct sata_dwc_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 fptagr; /* 1st party DMA tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 fpbor; /* 1st party DMA buffer offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 fptcr; /* 1st party DMA Xfr count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 dmacr; /* DMA Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 dbtsr; /* DMA Burst Transac size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 intpr; /* Interrupt Pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 intmr; /* Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 errmr; /* Error Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 llcr; /* Link Layer Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 phycr; /* PHY Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 physr; /* PHY Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 rxbistpd; /* Recvd BIST pattern def register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 rxbistpd1; /* Recvd BIST data dword1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 txbistpd; /* Trans BIST pattern def register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 txbistpd1; /* Trans BIST data dword1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 txbistpd2; /* Trans BIST data dword2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u32 bistcr; /* BIST Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 bistfctr; /* BIST FIS Count Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 bistsr; /* BIST Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 bistdecr; /* BIST Dword Error count register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 res[15]; /* Reserved locations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 testr; /* Test Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 versionr; /* Version Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 idr; /* ID Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 unimpl[192]; /* Unimplemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 dmadr[256]; /* FIFO Locations in DMA Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) SCR_SCONTROL_DET_ENABLE = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) SCR_SSTATUS_DET_PRESENT = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) SCR_SERROR_DIAG_X = 0x04000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* DWC SATA Register Operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) SATA_DWC_TXFIFO_DEPTH = 0x01FF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) SATA_DWC_RXFIFO_DEPTH = 0x01FF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) SATA_DWC_INTPR_DMAT = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) SATA_DWC_INTPR_NEWFP = 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) SATA_DWC_INTPR_PMABRT = 0x00000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) SATA_DWC_INTPR_ERR = 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) SATA_DWC_INTPR_NEWBIST = 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) SATA_DWC_INTPR_IPF = 0x10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) SATA_DWC_INTMR_DMATM = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) SATA_DWC_INTMR_NEWFPM = 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) SATA_DWC_INTMR_PMABRTM = 0x00000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) SATA_DWC_INTMR_ERRM = 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) SATA_DWC_INTMR_NEWBISTM = 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) SATA_DWC_LLCR_SCRAMEN = 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) SATA_DWC_LLCR_RPDEN = 0x00000004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* This is all error bits, zero's are reserved fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) SATA_DWC_DMACR_TMOD_TXCHEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) SATA_DWC_DMACR_TMOD_TXCHEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct sata_dwc_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct device *dev; /* generic device struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ata_probe_ent *pe; /* ptr to probe-ent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 sactive_issued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 sactive_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) phys_addr_t dmadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #ifdef CONFIG_SATA_DWC_OLD_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct dw_dma_chip *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define SATA_DWC_QCMD_MAX 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct sata_dwc_device_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct sata_dwc_device *hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int cmd_issued[SATA_DWC_QCMD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int dma_pending[SATA_DWC_QCMD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* DMA info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 dma_interrupt_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Commonly used DWC SATA driver macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) SATA_DWC_CMD_ISSUED_NOT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) SATA_DWC_CMD_ISSUED_PEND = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) SATA_DWC_CMD_ISSUED_EXEC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) SATA_DWC_CMD_ISSUED_NODATA = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) SATA_DWC_DMA_PENDING_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) SATA_DWC_DMA_PENDING_TX = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) SATA_DWC_DMA_PENDING_RX = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Prototypes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void sata_dwc_port_stop(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #ifdef CONFIG_SATA_DWC_OLD_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #include <linux/platform_data/dma-dw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #include <linux/dma/dw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static struct dw_dma_slave sata_dwc_dma_dws = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .src_id = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .dst_id = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .m_master = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .p_master = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct dw_dma_slave *dws = &sata_dwc_dma_dws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (dws->dma_dev != chan->device->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) chan->private = dws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct sata_dwc_device *hsdev = hsdevp->hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct dw_dma_slave *dws = &sata_dwc_dma_dws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dws->dma_dev = hsdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Acquire DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!hsdevp->chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev_err(hsdev->dev, "%s: dma channel unavailable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int sata_dwc_dma_init_old(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct sata_dwc_device *hsdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!hsdev->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) hsdev->dma->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hsdev->dma->id = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Get SATA DMA interrupt number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) hsdev->dma->irq = irq_of_parse_and_map(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (hsdev->dma->irq == NO_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) dev_err(&pdev->dev, "no SATA DMA irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Get physical SATA DMA register base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (IS_ERR(hsdev->dma->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return PTR_ERR(hsdev->dma->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Initialize AHB DMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return dw_dma_probe(hsdev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!hsdev->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dw_dma_remove(hsdev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static const char *get_prot_descript(u8 protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) switch (protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return "ATA no data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return "ATA PIO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return "ATA DMA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case ATA_PROT_NCQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return "ATA NCQ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) case ATA_PROT_NCQ_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return "ATA NCQ no data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return "ATAPI no data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return "ATAPI PIO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return "ATAPI DMA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static const char *get_dma_dir_descript(int dma_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) switch ((enum dma_data_direction)dma_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return "bidirectional";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return "to device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return "from device";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dev_vdbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) tf->command, get_prot_descript(tf->protocol), tf->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tf->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev_vdbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev_vdbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) tf->hob_lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void dma_dwc_xfer_done(void *hsdev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct sata_dwc_device *hsdev = hsdev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct ata_host *host = (struct ata_host *)hsdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct sata_dwc_device_port *hsdevp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u8 tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned int port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) tag = ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Each DMA command produces 2 interrupts. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * complete the command after both interrupts have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * seen. (See sata_dwc_isr())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) hsdevp->dma_interrupt_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sata_dwc_clear_dmacr(hsdevp, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tag, hsdevp->dma_pending[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if ((hsdevp->dma_interrupt_count % 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) sata_dwc_dma_xfer_complete(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct dma_slave_config sconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (qc->dma_dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) sconf.src_addr = hsdev->dmadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sconf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } else { /* DMA_MEM_TO_DEV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sconf.dst_addr = hsdev->dmadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) sconf.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) sconf.direction = qc->dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dmaengine_slave_config(hsdevp->chan, &sconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Convert SG list to linked list of items (LLIs) for AHB DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) qc->dma_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) desc->callback = dma_dwc_xfer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) desc->callback_param = hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) qc->sg, qc->n_elem, &hsdev->dmadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (scr > SCR_NOTIFICATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __func__, scr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) link->ap->print_id, scr, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) link->ap->print_id, scr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (scr > SCR_NOTIFICATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) __func__, scr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void clear_serror(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static u32 qcmd_tag_to_mask(u8 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0x00000001 << (tag & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* See ahci.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void sata_dwc_error_intr(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct sata_dwc_device *hsdev, uint intpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned int err_mask = 0, action = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u32 serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u8 status, tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) status = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) tag = ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_err(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Clear error register and interrupt bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) clear_serror(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* This is the only error happening now. TODO check for exact error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) err_mask |= AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Pass this on to EH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ehi->serror |= serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ehi->action |= action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) qc->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ehi->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ata_port_abort(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Function : sata_dwc_isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * arguments : irq, void *dev_instance, struct pt_regs *regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Return value : irqreturn_t - status of IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * This Interrupt handler called via port ops registered function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * .irq_handler = sata_dwc_isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct ata_host *host = (struct ata_host *)dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u8 status, tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int handled, num_processed, port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) uint intpr, sactive, sactive2, tag_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct sata_dwc_device_port *hsdevp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) hsdev->sactive_issued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Read the interrupt register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Check for error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (intpr & SATA_DWC_INTPR_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) sata_dwc_error_intr(ap, hsdev, intpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Check for DMA SETUP FIS (FP DMA) interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (intpr & SATA_DWC_INTPR_NEWFP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Start FP DMA for NCQ command. At this point the tag is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * active tag. It is the tag that matches the command about to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) qc->ap->link.active_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) sata_dwc_bmdma_start_by_tag(qc, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* If no sactive issued and tag_mask is zero then this is not NCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (hsdev->sactive_issued == 0 && tag_mask == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (ap->link.active_tag == ATA_TAG_POISON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tag = ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* DEV interrupt w/ no active qc? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dev_err(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) "%s interrupt with no active qc qc=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) __func__, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) status = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) qc->ap->link.active_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (status & ATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) sata_dwc_qc_complete(ap, qc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __func__, get_prot_descript(qc->tf.protocol));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) DRVSTILLBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Each DMA transaction produces 2 interrupts. The DMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * transfer complete interrupt and the SATA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * operation done interrupt. The command should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * completed only after both interrupts are seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) hsdevp->dma_interrupt_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (hsdevp->dma_pending[tag] == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) SATA_DWC_DMA_PENDING_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_err(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) __func__, intpr, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) hsdevp->dma_pending[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if ((hsdevp->dma_interrupt_count % 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sata_dwc_dma_xfer_complete(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) } else if (ata_is_pio(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ata_sff_hsm_move(ap, qc, status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) goto DRVSTILLBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * This is a NCQ command. At this point we need to figure out for which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * tags we have gotten a completion interrupt. One interrupt may serve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * as completion for more than one operation when commands are queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * (NCQ). We need to process each completed command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* process completed commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dev_dbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) __func__, sactive, hsdev->sactive_issued, tag_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dev_warn(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sactive, hsdev->sactive_issued, tag_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* read just to clear ... not bad if currently still busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) status = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) num_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) while (tag_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) num_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) while (!(tag_mask & 0x00000001)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) tag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) tag_mask <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) tag_mask &= (~0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* To be picked up by completion functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) qc->ap->link.active_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Let libata/scsi layers handle error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (status & ATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sata_dwc_qc_complete(ap, qc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Process completed command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) get_prot_descript(qc->tf.protocol));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) hsdevp->dma_interrupt_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (hsdevp->dma_pending[tag] == \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) SATA_DWC_DMA_PENDING_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_warn(ap->dev, "%s: DMA not pending?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if ((hsdevp->dma_interrupt_count % 2) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) sata_dwc_dma_xfer_complete(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) goto STILLBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) STILLBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ap->stats.idle_irq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ap->print_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) } /* while tag_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Check to see if any commands completed while we were processing our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * initial set of completed commands (read status clears interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * so we might miss a completed command interrupt if one came in while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * we were processing --we read status as part of processing a completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * command).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (sactive2 != sactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dev_dbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) "More completed - sactive=0x%x sactive2=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) sactive, sactive2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * This should not happen, it indicates the driver is out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * sync. If it does happen, clear dmacr anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dev_err(hsdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) __func__, tag, hsdevp->dma_pending[tag], dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) SATA_DWC_DMACR_TXRXCH_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u8 tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) tag = ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) qc = ata_qc_from_tag(ap, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (!qc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev_err(ap->dev, "failed to get qc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #ifdef DEBUG_NCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (tag > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dev_info(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) __func__, qc->hw_tag, qc->tf.command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) get_dma_dir_descript(qc->dma_dir),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) get_prot_descript(qc->tf.protocol),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev_err(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) sata_dwc_qc_complete(ap, qc, check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ap->link.active_tag = ATA_TAG_POISON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) sata_dwc_qc_complete(ap, qc, check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u32 check_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) u32 mask = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) u8 tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) hsdev->sactive_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dev_err(ap->dev, "TX DMA PENDING\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev_err(ap->dev, "RX DMA PENDING\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dev_dbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) qc->tf.command, status, ap->print_id, qc->tf.protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* clear active bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) mask = (~(qcmd_tag_to_mask(tag)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) hsdev->sactive_queued = hsdev->sactive_queued & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) hsdev->sactive_issued = hsdev->sactive_issued & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Enable selective interrupts by setting the interrupt maskregister*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) SATA_DWC_INTMR_ERRM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) SATA_DWC_INTMR_NEWFPM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) SATA_DWC_INTMR_PMABRTM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) SATA_DWC_INTMR_DMATM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Unmask the error bits that should trigger an error interrupt by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * setting the error mask register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) port->cmd_addr = base + 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) port->data_addr = base + 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) port->error_addr = base + 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) port->feature_addr = base + 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) port->nsect_addr = base + 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) port->lbal_addr = base + 0x0c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) port->lbam_addr = base + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) port->lbah_addr = base + 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) port->device_addr = base + 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) port->command_addr = base + 0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) port->status_addr = base + 0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) port->altstatus_addr = base + 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) port->ctl_addr = base + 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct sata_dwc_device *hsdev = hsdevp->hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct device *dev = hsdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #ifdef CONFIG_SATA_DWC_OLD_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!of_find_property(dev->of_node, "dmas", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return sata_dwc_dma_get_channel_old(hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) hsdevp->chan = dma_request_chan(dev, "sata-dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (IS_ERR(hsdevp->chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dev_err(dev, "failed to allocate dma channel: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) PTR_ERR(hsdevp->chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return PTR_ERR(hsdevp->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Function : sata_dwc_port_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * arguments : struct ata_ioports *port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Return value : returns 0 if success, error code otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * This function allocates the scatter gather LLI table for AHB DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int sata_dwc_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct sata_dwc_device *hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct sata_dwc_device_port *hsdevp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) hsdev = HSDEV_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) hsdev->host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pdev = ap->host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) goto CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Allocate Port Struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!hsdevp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) goto CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) hsdevp->hsdev = hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) err = sata_dwc_dma_get_channel(hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) goto CLEANUP_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) err = phy_power_on(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) goto CLEANUP_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ap->bmdma_prd_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (ap->port_no == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) SATA_DWC_DMACR_TXRXCH_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* Clear any error bits before libata starts issuing commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) clear_serror(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ap->private_data = hsdevp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dev_dbg(ap->dev, "%s: done\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) CLEANUP_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) kfree(hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) CLEANUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static void sata_dwc_port_stop(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dmaengine_terminate_sync(hsdevp->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dma_release_channel(hsdevp->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) phy_power_off(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) kfree(hsdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ap->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Function : sata_dwc_exec_command_by_tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Return value : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * This function keeps track of individual command tag ids and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * ata_exec_command in libata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct ata_taskfile *tf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) u8 tag, u32 cmd_issued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ata_get_cmd_descript(tf->command), tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) hsdevp->cmd_issued[tag] = cmd_issued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * Clear SError before executing a new command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * sata_dwc_scr_write and read can not be used here. Clearing the PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * managed SError register for the disk needs to be done before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * task file is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) clear_serror(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ata_sff_exec_command(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) SATA_DWC_CMD_ISSUED_PEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) u8 tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (ata_is_ncq(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) __func__, qc->ap->link.sactive, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) sata_dwc_bmdma_setup_by_tag(qc, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int start_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) int dir = qc->dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) start_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dev_err(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __func__, hsdevp->cmd_issued[tag], tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) start_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dev_dbg(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) __func__, qc, tag, qc->tf.command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) get_dma_dir_descript(qc->dma_dir), start_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) sata_dwc_tf_dump(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (start_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) sata_dwc_scr_read(&ap->link, SCR_ERROR, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (reg & SATA_DWC_SERROR_ERR_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) __func__, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) SATA_DWC_DMACR_TXCHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) SATA_DWC_DMACR_RXCHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Enable AHB DMA transfer on the specified channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dma_async_issue_pending(hsdevp->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u8 tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ata_is_ncq(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) __func__, qc->ap->link.sactive, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dev_dbg(qc->ap->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sata_dwc_bmdma_start_by_tag(qc, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u32 sactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) u8 tag = qc->hw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) #ifdef DEBUG_NCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (qc->hw_tag > 0 || ap->link.sactive > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) dev_info(ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) __func__, ap->print_id, qc->tf.command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ata_get_cmd_descript(qc->tf.command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) qc->hw_tag, get_prot_descript(qc->tf.protocol),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ap->link.active_tag, ap->link.sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (!ata_is_ncq(qc->tf.protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!hsdevp->desc[tag])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) hsdevp->desc[tag] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (ata_is_ncq(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) sactive |= (0x00000001 << tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dev_dbg(qc->ap->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) __func__, tag, qc->ap->link.sactive, sactive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ap->ops->sff_tf_load(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) SATA_DWC_CMD_ISSUED_PEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return ata_bmdma_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void sata_dwc_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ata_sff_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ret = sata_sff_hardreset(link, class, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) sata_dwc_enable_interrupts(hsdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Reconfigure the DMA control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) SATA_DWC_DMACR_TXRXCH_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Reconfigure the DMA Burst Transaction Size register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /* SATA DWC is master only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * scsi mid-layer and libata interface structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static struct scsi_host_template sata_dwc_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ATA_NCQ_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * test-only: Currently this driver doesn't handle NCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * correctly. We enable NCQ but set the queue depth to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * max of 1. This will get fixed in in a future release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .sg_tablesize = LIBATA_MAX_PRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* .can_queue = ATA_MAX_QUEUE, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * Make sure a LLI block is not created that will span 8K max FIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * boundary. If the block spans such a FIS boundary, there is a chance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * that a DMA burst will cross that boundary -- this results in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * error in the host controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static struct ata_port_operations sata_dwc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .inherits = &ata_sff_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .error_handler = sata_dwc_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .hardreset = sata_dwc_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .qc_issue = sata_dwc_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .scr_read = sata_dwc_scr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .scr_write = sata_dwc_scr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .port_start = sata_dwc_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .port_stop = sata_dwc_port_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .sff_dev_select = sata_dwc_dev_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) .bmdma_setup = sata_dwc_bmdma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) .bmdma_start = sata_dwc_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static const struct ata_port_info sata_dwc_port_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) .udma_mask = ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .port_ops = &sata_dwc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int sata_dwc_probe(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct sata_dwc_device *hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) u32 idr, versionr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) char *ver = (char *)&versionr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct ata_port_info pi = sata_dwc_port_info[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) const struct ata_port_info *ppi[] = { &pi, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* Allocate DWC SATA device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!host || !hsdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) host->private_data = hsdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Ioremap SATA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) base = devm_ioremap_resource(&ofdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Synopsys DWC SATA specific Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* Setup port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) host->ports[0]->ioaddr.cmd_addr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* Read the ID and Version Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) idr, ver[0], ver[1], ver[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* Save dev for later use in dev_xxx() routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) hsdev->dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Enable SATA Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) sata_dwc_enable_interrupts(hsdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* Get SATA interrupt number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (irq == NO_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dev_err(&ofdev->dev, "no SATA DMA irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #ifdef CONFIG_SATA_DWC_OLD_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!of_find_property(np, "dmas", NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) err = sata_dwc_dma_init_old(ofdev, hsdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (IS_ERR(hsdev->phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return PTR_ERR(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) err = phy_init(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * Now, register with libATA core, this will also initiate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * device discovery process, invoking our port_start() handler &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * error_handler() to execute a dummy Softreset EH session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dev_err(&ofdev->dev, "failed to activate host");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) phy_exit(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static int sata_dwc_remove(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct device *dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct ata_host *host = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct sata_dwc_device *hsdev = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ata_host_detach(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) phy_exit(hsdev->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) #ifdef CONFIG_SATA_DWC_OLD_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Free SATA DMA resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) sata_dwc_dma_exit_old(hsdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) dev_dbg(&ofdev->dev, "done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static const struct of_device_id sata_dwc_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) { .compatible = "amcc,sata-460ex", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) MODULE_DEVICE_TABLE(of, sata_dwc_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static struct platform_driver sata_dwc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .of_match_table = sata_dwc_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) .probe = sata_dwc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) .remove = sata_dwc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) module_platform_driver(sata_dwc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) MODULE_VERSION(DRV_VERSION);