Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  sata_promise.c - Promise SATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Maintained by:  Tejun Heo <tj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *		    Mikael Pettersson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  		    Please ALWAYS copy linux-ide@vger.kernel.org
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *		    on emails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  Copyright 2003-2004 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *  libata documentation is available via 'make {ps|pdf}docs',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *  as Documentation/driver-api/libata.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *  Hardware information only available under NDA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "sata_promise.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define DRV_NAME	"sata_promise"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define DRV_VERSION	"2.12"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	PDC_MAX_PORTS		= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	PDC_MMIO_BAR		= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	PDC_MAX_PRD		= LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	/* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	PDC_FLASH_CTL		= 0x44, /* Flash control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	PDC_PCI_CTL		= 0x48, /* PCI control/status reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	PDC_SATA_PLUG_CSR	= 0x6C, /* SATA Plug control/status reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	PDC2_SATA_PLUG_CSR	= 0x60, /* SATAII Plug control/status reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	PDC_TBG_MODE		= 0x41C, /* TBG mode (not SATAII) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	PDC_SLEW_CTL		= 0x470, /* slew rate control reg (not SATAII) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	/* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	PDC_FEATURE		= 0x04, /* Feature/Error reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	PDC_SECTOR_COUNT	= 0x08, /* Sector count reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	PDC_SECTOR_NUMBER	= 0x0C, /* Sector number reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	PDC_CYLINDER_LOW	= 0x10, /* Cylinder low reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	PDC_CYLINDER_HIGH	= 0x14, /* Cylinder high reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	PDC_DEVICE		= 0x18, /* Device/Head reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	PDC_COMMAND		= 0x1C, /* Command/status reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	PDC_ALTSTATUS		= 0x38, /* Alternate-status/device-control reg (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	PDC_GLOBAL_CTL		= 0x48, /* Global control/status (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	PDC_CTLSTAT		= 0x60,	/* IDE control and status (per port) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	/* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	PDC_SATA_ERROR		= 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	PDC_PHYMODE4		= 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	PDC_LINK_LAYER_ERRORS	= 0x6C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	PDC_FPDMA_CTLSTAT	= 0xD8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	PDC_INTERNAL_DEBUG_1	= 0xF8,	/* also used for PATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	PDC_INTERNAL_DEBUG_2	= 0xFC,	/* also used for PATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	/* PDC_FPDMA_CTLSTAT bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	PDC_FPDMA_CTLSTAT_RESET			= 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG	= 1 << 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG	= 1 << 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	/* PDC_GLOBAL_CTL bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	PDC_PH_ERR		= (1 <<  8), /* PCI error while loading packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	PDC_SH_ERR		= (1 <<  9), /* PCI error while loading S/G table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	PDC_DH_ERR		= (1 << 10), /* PCI error while loading data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	PDC2_HTO_ERR		= (1 << 12), /* host bus timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	PDC2_ATA_HBA_ERR	= (1 << 13), /* error during SATA DATA FIS transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	PDC2_ATA_DMA_CNT_ERR	= (1 << 14), /* DMA DATA FIS size differs from S/G count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	PDC_OVERRUN_ERR		= (1 << 19), /* S/G byte count larger than HD requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	PDC_UNDERRUN_ERR	= (1 << 20), /* S/G byte count less than HD requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	PDC_DRIVE_ERR		= (1 << 21), /* drive error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	PDC_PCI_SYS_ERR		= (1 << 22), /* PCI system error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	PDC1_PCI_PARITY_ERR	= (1 << 23), /* PCI parity error (from SATA150 driver) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	PDC1_ERR_MASK		= PDC1_PCI_PARITY_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	PDC2_ERR_MASK		= PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 				  PDC2_ATA_DMA_CNT_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	PDC_ERR_MASK		= PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 				  PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 				  PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 				  PDC1_ERR_MASK | PDC2_ERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	board_2037x		= 0,	/* FastTrak S150 TX2plus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	board_2037x_pata	= 1,	/* FastTrak S150 TX2plus PATA port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	board_20319		= 2,	/* FastTrak S150 TX4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	board_20619		= 3,	/* FastTrak TX4000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	board_2057x		= 4,	/* SATAII150 Tx2plus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	board_2057x_pata	= 5,	/* SATAII150 Tx2plus PATA port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	board_40518		= 6,	/* SATAII150 Tx4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	PDC_HAS_PATA		= (1 << 1), /* PDC20375/20575 has PATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	/* Sequence counter control registers bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	PDC_SEQCNTRL_INT_MASK	= (1 << 5), /* Sequence Interrupt Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	/* Feature register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	PDC_FEATURE_ATAPI_PIO	= 0x00, /* ATAPI data xfer by PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	PDC_FEATURE_ATAPI_DMA	= 0x01, /* ATAPI data xfer by DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	/* Device/Head register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	PDC_DEVICE_SATA		= 0xE0, /* Device/Head value for SATA devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	/* PDC_CTLSTAT bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	PDC_DMA_ENABLE		= (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	PDC_IRQ_DISABLE		= (1 << 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	PDC_RESET		= (1 << 11), /* HDMA reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	PDC_COMMON_FLAGS	= ATA_FLAG_PIO_POLLING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	/* ap->flags bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	PDC_FLAG_GEN_II		= (1 << 24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	PDC_FLAG_SATA_PATA	= (1 << 25), /* supports SATA + PATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	PDC_FLAG_4_PORTS	= (1 << 26), /* 4 ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) struct pdc_port_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	u8			*pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	dma_addr_t		pkt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) struct pdc_host_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	spinlock_t hard_reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static int pdc_common_port_start(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static int pdc_sata_port_start(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static void pdc_irq_clear(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void pdc_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static void pdc_sata_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static void pdc_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static void pdc_sata_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			      unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			      unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void pdc_error_handler(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static int pdc_pata_cable_detect(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static struct scsi_host_template pdc_ata_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	ATA_BASE_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	.sg_tablesize		= PDC_MAX_PRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	.dma_boundary		= ATA_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) static const struct ata_port_operations pdc_common_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	.inherits		= &ata_sff_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	.sff_tf_load		= pdc_tf_load_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	.sff_exec_command	= pdc_exec_command_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	.check_atapi_dma	= pdc_check_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	.qc_prep		= pdc_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	.qc_issue		= pdc_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	.sff_irq_clear		= pdc_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	.lost_interrupt		= ATA_OP_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	.post_internal_cmd	= pdc_post_internal_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	.error_handler		= pdc_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) static struct ata_port_operations pdc_sata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	.inherits		= &pdc_common_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	.cable_detect		= ata_cable_sata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	.freeze			= pdc_sata_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	.thaw			= pdc_sata_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	.scr_read		= pdc_sata_scr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	.scr_write		= pdc_sata_scr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	.port_start		= pdc_sata_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	.hardreset		= pdc_sata_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /* First-generation chips need a more restrictive ->check_atapi_dma op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)    and ->freeze/thaw that ignore the hotplug controls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static struct ata_port_operations pdc_old_sata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	.inherits		= &pdc_sata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	.freeze			= pdc_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	.thaw			= pdc_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	.check_atapi_dma	= pdc_old_sata_check_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static struct ata_port_operations pdc_pata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	.inherits		= &pdc_common_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	.cable_detect		= pdc_pata_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	.freeze			= pdc_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	.thaw			= pdc_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	.port_start		= pdc_common_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	.softreset		= pdc_pata_softreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static const struct ata_port_info pdc_port_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	[board_2037x] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 				  PDC_FLAG_SATA_PATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		.port_ops	= &pdc_old_sata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	[board_2037x_pata] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		.port_ops	= &pdc_pata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	[board_20319] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 				  PDC_FLAG_4_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		.port_ops	= &pdc_old_sata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	[board_20619] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				  PDC_FLAG_4_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		.port_ops	= &pdc_pata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	[board_2057x] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 				  PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		.port_ops	= &pdc_sata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	[board_2057x_pata] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				  PDC_FLAG_GEN_II,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		.port_ops	= &pdc_pata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	[board_40518] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				  PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		.port_ops	= &pdc_sata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static const struct pci_device_id pdc_ata_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	{ }	/* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static struct pci_driver pdc_ata_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	.name			= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	.id_table		= pdc_ata_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	.probe			= pdc_ata_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	.remove			= ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static int pdc_common_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct device *dev = ap->host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct pdc_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* we use the same prd table as bmdma, allocate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	rc = ata_bmdma_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (!pp->pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	ap->private_data = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static int pdc_sata_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	rc = pdc_common_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/* fix up PHYMODE4 align timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (ap->flags & PDC_FLAG_GEN_II) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		void __iomem *sata_mmio = ap->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		tmp = readl(sata_mmio + PDC_PHYMODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		tmp = (tmp & ~3) | 1;	/* set bits 1:0 = 0:1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		writel(tmp, sata_mmio + PDC_PHYMODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/* It's not allowed to write to the entire FPDMA_CTLSTAT register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	   when NCQ is running. So do a byte-sized write to bits 10 and 11. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static void pdc_fpdma_reset(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	u8 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	tmp &= 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	tmp |= PDC_FPDMA_CTLSTAT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	tmp &= ~PDC_FPDMA_CTLSTAT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	pdc_fpdma_clear_interrupt_flag(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static void pdc_not_at_command_packet_phase(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/* check not at ASIC packet command phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	for (i = 0; i < 100; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		if ((tmp & 0xF) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	writel(0xffffffff, sata_mmio + PDC_SATA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static void pdc_reset_port(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (ap->flags & PDC_FLAG_GEN_II)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		pdc_not_at_command_packet_phase(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	tmp = readl(ata_ctlstat_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	tmp |= PDC_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	writel(tmp, ata_ctlstat_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	for (i = 11; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		tmp = readl(ata_ctlstat_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		if (tmp & PDC_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		tmp |= PDC_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		writel(tmp, ata_ctlstat_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	tmp &= ~PDC_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	writel(tmp, ata_ctlstat_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	readl(ata_ctlstat_mmio);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		pdc_fpdma_reset(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		pdc_clear_internal_debug_record_error_register(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static int pdc_pata_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	u8 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (tmp & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static int pdc_sata_scr_read(struct ata_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			     unsigned int sc_reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (sc_reg > SCR_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static int pdc_sata_scr_write(struct ata_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			      unsigned int sc_reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (sc_reg > SCR_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	dma_addr_t sg_table = ap->bmdma_prd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	unsigned int cdb_len = qc->dev->cdb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	u8 *cdb = qc->cdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct pdc_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	u8 *buf = pp->pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	__le32 *buf32 = (__le32 *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	unsigned int dev_sel, feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/* set control bits (byte 0), zero delay seq id (byte 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * and seq id (byte 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			buf32[0] = cpu_to_le32(PDC_PKT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			buf32[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	buf32[1] = cpu_to_le32(sg_table);	/* S/G table addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	buf32[2] = 0;				/* no next-packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* select drive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (sata_scr_valid(&ap->link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		dev_sel = PDC_DEVICE_SATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		dev_sel = qc->tf.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	buf[12] = (1 << 5) | ATA_REG_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	buf[13] = dev_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	buf[15] = dev_sel; /* once more, waiting for BSY to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	buf[16] = (1 << 5) | ATA_REG_NSECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	buf[17] = qc->tf.nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	buf[18] = (1 << 5) | ATA_REG_LBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	buf[19] = qc->tf.lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/* set feature and byte counter registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (qc->tf.protocol != ATAPI_PROT_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		feature = PDC_FEATURE_ATAPI_PIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		feature = PDC_FEATURE_ATAPI_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	buf[20] = (1 << 5) | ATA_REG_FEATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	buf[21] = feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	buf[22] = (1 << 5) | ATA_REG_BYTEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	buf[23] = qc->tf.lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	buf[24] = (1 << 5) | ATA_REG_BYTEH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	buf[25] = qc->tf.lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* send ATAPI packet command 0xA0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	buf[26] = (1 << 5) | ATA_REG_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	buf[27] = qc->tf.command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* select drive and check DRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	buf[29] = dev_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	BUG_ON(cdb_len & ~0x1E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/* append the CDB as the final part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	memcpy(buf+31, cdb, cdb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  *	pdc_fill_sg - Fill PCI IDE PRD table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  *	@qc: Metadata associated with taskfile to be transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  *	Fill PCI IDE PRD (scatter-gather) table with segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  *	associated with the current disk command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *	Make sure hardware does not choke on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  *	spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) static void pdc_fill_sg(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	const u32 SG_COUNT_ASIC_BUG = 41*4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	unsigned int si, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		u32 addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		u32 sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		/* determine if physical DMA addr spans 64K boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		 * Note h/w doesn't support 64-bit, so we unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		 * truncate dma_addr_t to u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		addr = (u32) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		sg_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		while (sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			offset = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			if ((offset + sg_len) > 0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				len = 0x10000 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			prd[idx].addr = cpu_to_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			sg_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	len = le32_to_cpu(prd[idx - 1].flags_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (len > SG_COUNT_ASIC_BUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		VPRINTK("Splitting last PRD.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		addr = le32_to_cpu(prd[idx - 1].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		addr = addr + len - SG_COUNT_ASIC_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		len = SG_COUNT_ASIC_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		prd[idx].addr = cpu_to_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		prd[idx].flags_len = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	struct pdc_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		pdc_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				   qc->dev->devno, pp->pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (qc->tf.flags & ATA_TFLAG_LBA48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		pdc_pkt_footer(&qc->tf, pp->pkt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		pdc_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		pdc_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		pdc_atapi_pkt(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) static int pdc_is_sataii_tx4(unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	return (flags & mask) == mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 					  int is_sataii_tx4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	const struct ata_host *host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	unsigned int nr_ports = pdc_sata_nr_ports(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	BUG_ON(i >= nr_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static void pdc_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	tmp = readl(ata_mmio + PDC_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	tmp |= PDC_IRQ_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	tmp &= ~PDC_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	writel(tmp, ata_mmio + PDC_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	readl(ata_mmio + PDC_CTLSTAT); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static void pdc_sata_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct ata_host *host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	u32 hotplug_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/* Disable hotplug events on this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * Locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * 1) hotplug register accesses must be serialised via host->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * 2) ap->lock == &ap->host->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 * 3) ->freeze() and ->thaw() are called with ap->lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	hotplug_status = readl(host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	hotplug_status |= 0x11 << (ata_no + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	writel(hotplug_status, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	readl(host_mmio + hotplug_offset); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	pdc_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) static void pdc_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	readl(ata_mmio + PDC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/* turn IRQ back on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	tmp = readl(ata_mmio + PDC_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	tmp &= ~PDC_IRQ_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	writel(tmp, ata_mmio + PDC_CTLSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	readl(ata_mmio + PDC_CTLSTAT); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static void pdc_sata_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct ata_host *host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	u32 hotplug_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	pdc_thaw(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* Enable hotplug events on this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	 * Locking: see pdc_sata_freeze().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	hotplug_status = readl(host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	hotplug_status |= 0x11 << ata_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	hotplug_status &= ~(0x11 << (ata_no + 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	writel(hotplug_status, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	readl(host_mmio + hotplug_offset); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			      unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	pdc_reset_port(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	return ata_sff_softreset(link, class, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	return (ata_mmio - host_mmio - 0x200) / 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static void pdc_hard_reset_port(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	struct pdc_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	u8 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	spin_lock(&hpriv->hard_reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	tmp = readb(pcictl_b1_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	tmp &= ~(0x10 << ata_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	writeb(tmp, pcictl_b1_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	readb(pcictl_b1_mmio); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	tmp |= (0x10 << ata_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	writeb(tmp, pcictl_b1_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	readb(pcictl_b1_mmio); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	spin_unlock(&hpriv->hard_reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			      unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (link->ap->flags & PDC_FLAG_GEN_II)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		pdc_not_at_command_packet_phase(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	/* hotplug IRQs should have been masked by pdc_sata_freeze() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	pdc_hard_reset_port(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	pdc_reset_port(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/* sata_promise can't reliably acquire the first D2H Reg FIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 * after hardreset.  Do non-waiting hardreset and request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	 * follow-up SRST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	return sata_std_hardreset(link, class, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static void pdc_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (!(ap->pflags & ATA_PFLAG_FROZEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		pdc_reset_port(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	ata_sff_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/* make DMA engine forget about the failed command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (qc->flags & ATA_QCFLAG_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		pdc_reset_port(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			   u32 port_status, u32 err_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	unsigned int ac_err_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	port_status &= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (port_status & PDC_DRIVE_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		ac_err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		ac_err_mask |= AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		ac_err_mask |= AC_ERR_ATA_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			   | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		ac_err_mask |= AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (sata_scr_valid(&ap->link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		u32 serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		ehi->serror |= serror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	qc->err_mask |= ac_err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	pdc_reset_port(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	ata_port_abort(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) static unsigned int pdc_host_intr(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				  struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	u32 port_status, err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	err_mask = PDC_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (ap->flags & PDC_FLAG_GEN_II)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		err_mask &= ~PDC1_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		err_mask &= ~PDC2_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (unlikely(port_status & err_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		pdc_error_intr(ap, qc, port_status, err_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		ap->stats.idle_irq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static void pdc_irq_clear(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	readl(ata_mmio + PDC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	u32 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	unsigned int i, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	void __iomem *host_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	unsigned int hotplug_offset, ata_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	u32 hotplug_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	int is_sataii_tx4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (!host || !host->iomap[PDC_MMIO_BAR]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		VPRINTK("QUICK EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	host_mmio = host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	/* read and clear hotplug flags for all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (host->ports[0]->flags & PDC_FLAG_GEN_II) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		hotplug_offset = PDC2_SATA_PLUG_CSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		hotplug_status = readl(host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (hotplug_status & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		hotplug_status &= 0xff;	/* clear uninteresting bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		hotplug_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	/* reading should also clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	mask = readl(host_mmio + PDC_INT_SEQMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (mask == 0xffffffff && hotplug_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		VPRINTK("QUICK EXIT 2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		goto done_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	mask &= 0xffff;		/* only 16 SEQIDs possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (mask == 0 && hotplug_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		VPRINTK("QUICK EXIT 3\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		goto done_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	writel(mask, host_mmio + PDC_INT_SEQMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		VPRINTK("port %u\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		/* check for a plug or unplug event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		tmp = hotplug_status & (0x11 << ata_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			ata_ehi_hotplugged(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			++handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		/* check for a packet interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		tmp = mask & (1 << (i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				handled += pdc_host_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	VPRINTK("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) done_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) static void pdc_packet_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct pdc_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	unsigned int port_no = ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	u8 seq = (u8) (port_no + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	VPRINTK("ENTER, ap %p\n", ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	writel(0x00000001, host_mmio + (seq * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	readl(host_mmio + (seq * 4));	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	pp->pkt[2] = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	wmb();			/* flush PRD, pkt writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		pdc_packet_start(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	return ata_sff_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	ata_sff_tf_load(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static void pdc_exec_command_mmio(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				  const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	ata_sff_exec_command(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	u8 *scsicmd = qc->scsicmd->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	int pio = 1; /* atapi dma off by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* Whitelist commands that may use DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	switch (scsicmd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	case 0xad: /* READ_DVD_STRUCTURE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	case 0xbe: /* READ_CD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		pio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	if (scsicmd[0] == WRITE_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		unsigned int lba =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			(scsicmd[2] << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			(scsicmd[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			(scsicmd[4] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			scsicmd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		if (lba >= 0xFFFF4FA2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			pio = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	return pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	/* First generation chips cannot use ATAPI DMA on SATA ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void pdc_ata_setup_port(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			       void __iomem *base, void __iomem *scr_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	ap->ioaddr.cmd_addr		= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	ap->ioaddr.data_addr		= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	ap->ioaddr.feature_addr		=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	ap->ioaddr.error_addr		= base + 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	ap->ioaddr.nsect_addr		= base + 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	ap->ioaddr.lbal_addr		= base + 0xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	ap->ioaddr.lbam_addr		= base + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	ap->ioaddr.lbah_addr		= base + 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	ap->ioaddr.device_addr		= base + 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	ap->ioaddr.command_addr		=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	ap->ioaddr.status_addr		= base + 0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	ap->ioaddr.altstatus_addr	=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	ap->ioaddr.ctl_addr		= base + 0x38;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	ap->ioaddr.scr_addr		= scr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static void pdc_host_init(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	int hotplug_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (is_gen2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		hotplug_offset = PDC2_SATA_PLUG_CSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		hotplug_offset = PDC_SATA_PLUG_CSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	 * Except for the hotplug stuff, this is voodoo from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	 * Promise driver.  Label this entire section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	 * "TODO: figure out why we do this"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	tmp = readl(host_mmio + PDC_FLASH_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	tmp |= 0x02000;	/* bit 13 (enable bmr burst) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (!is_gen2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		tmp |= 0x10000;	/* bit 16 (fifo threshold at 8 dw) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	writel(tmp, host_mmio + PDC_FLASH_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	/* clear plug/unplug flags for all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	tmp = readl(host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	writel(tmp | 0xff, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	tmp = readl(host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (is_gen2)	/* unmask plug/unplug ints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	else		/* mask plug/unplug ints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		writel(tmp | 0xff0000, host_mmio + hotplug_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	/* don't initialise TBG or SLEW on 2nd generation chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (is_gen2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	/* reduce TBG clock to 133 Mhz. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	tmp = readl(host_mmio + PDC_TBG_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	tmp &= ~0x30000; /* clear bit 17, 16*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	tmp |= 0x10000;  /* set bit 17:16 = 0:1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	writel(tmp, host_mmio + PDC_TBG_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	readl(host_mmio + PDC_TBG_MODE);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/* adjust slew rate control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	tmp = readl(host_mmio + PDC_SLEW_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	tmp  |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	writel(tmp, host_mmio + PDC_SLEW_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int pdc_ata_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			    const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	const struct ata_port_info *ppi[PDC_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	struct pdc_host_priv *hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	void __iomem *host_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int n_ports, i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	int is_sataii_tx4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	/* enable and acquire resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		pcim_pin_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/* determine port configuration and setup host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	n_ports = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (pi->flags & PDC_FLAG_4_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		n_ports = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	for (i = 0; i < n_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		ppi[i] = pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (pi->flags & PDC_FLAG_SATA_PATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		if (!(tmp & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			ppi[n_ports++] = pi + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		dev_err(&pdev->dev, "failed to allocate host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (!hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	spin_lock_init(&hpriv->hard_reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	host->private_data = hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	host->iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		unsigned int ata_offset = 0x200 + ata_no * 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		unsigned int scr_offset = 0x400 + ata_no * 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	/* initialize adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	pdc_host_init(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* start host, request IRQ and attach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 				 &pdc_ata_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) module_pci_driver(pdc_ata_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) MODULE_AUTHOR("Jeff Garzik");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) MODULE_VERSION(DRV_VERSION);