^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * pata_sl82c105.c - SL82C105 PATA for new ATA layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (C) 2005 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) 2011 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based in part on linux/drivers/ide/pci/sl82c105.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * SL82C105/Winbond 553 IDE driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and in part on the documentation and errata sheet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Note: The controller like many controllers has shared timings for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * in the dma_stop function. Thus we actually don't need a set_dmamode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * method as the PIO method is always called and will set the right PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * timing parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_NAME "pata_sl82c105"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DRV_VERSION "0.3.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * SL82C105 PCI config register 0x40 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) CTRL_IDE_IRQB = (1 << 30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) CTRL_IDE_IRQA = (1 << 28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) CTRL_LEGIRQ = (1 << 11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) CTRL_P1F16 = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) CTRL_P1EN = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) CTRL_P0F16 = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) CTRL_P0EN = (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * sl82c105_pre_reset - probe begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @link: ATA link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Set up cable type and use generic probe init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static const struct pci_bits sl82c105_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) { 0x40, 1, 0x01, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) { 0x40, 1, 0x10, 0x10 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * sl82c105_configure_piomode - set chip PIO timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @pio: PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Called to do the PIO mode setup. Our timing registers are shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * so a configure_dmamode call will undo any work we do here and vice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * versa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static u16 pio_timing[5] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 0x50D, 0x407, 0x304, 0x242, 0x240
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pci_write_config_word(pdev, timing, pio_timing[pio]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Can we lose this oddity of the old driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pci_read_config_word(pdev, timing, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * sl82c105_set_piomode - set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Called to do the PIO mode setup. Our timing registers are shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * but we want to set the PIO timing by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * sl82c105_configure_dmamode - set DMA mode in chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Load DMA cycle times into the chip ready for a DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static u16 dma_timing[3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 0x707, 0x201, 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int dma = adev->dma_mode - XFER_MW_DMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) pci_write_config_word(pdev, timing, dma_timing[dma]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Can we lose this oddity of the old driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pci_read_config_word(pdev, timing, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * sl82c105_reset_engine - Reset the DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * The sl82c105 has some serious problems with the DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * when transfers don't run as expected or ATAPI is used. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * recommended fix is to reset the engine each use using a chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * test register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void sl82c105_reset_engine(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pci_read_config_word(pdev, 0x7E, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pci_write_config_word(pdev, 0x7E, val | 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pci_write_config_word(pdev, 0x7E, val & ~4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * sl82c105_bmdma_start - DMA engine begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @qc: ATA command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Reset the DMA engine each use as recommended by the errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * document.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * FIXME: if we switch clock at BMDMA start/end we might get better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * PIO performance on DMA capable devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sl82c105_reset_engine(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Set the clocks for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) sl82c105_configure_dmamode(ap, qc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Activate DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ata_bmdma_start(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * sl82c105_bmdma_end - DMA engine stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @qc: ATA command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Reset the DMA engine each use as recommended by the errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * document.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * This function is also called to turn off DMA when a timeout occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * during DMA operation. In both cases we need to reset the engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * so no actual eng_timeout handler is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * We assume bmdma_stop is always called if bmdma_start as called. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * not then we may need to wrap qc_issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ata_bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sl82c105_reset_engine(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* This will redo the initial setup of the DMA device to matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) PIO timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) sl82c105_set_piomode(ap, qc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * sl82c105_qc_defer - implement serialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @qc: command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * We must issue one command per host not per channel because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * of the reset bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Q: is the scsi host lock sufficient ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct ata_host *host = qc->ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* First apply the usual rules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rc = ata_std_qc_defer(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Now apply serialization rules. Only allow a command if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) other channel state machine is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (alt && alt->qc_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static bool sl82c105_sff_irq_check(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pci_read_config_dword(pdev, 0x40, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static struct scsi_host_template sl82c105_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct ata_port_operations sl82c105_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .qc_defer = sl82c105_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .bmdma_start = sl82c105_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .bmdma_stop = sl82c105_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .cable_detect = ata_cable_40wire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .set_piomode = sl82c105_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .prereset = sl82c105_pre_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .sff_irq_check = sl82c105_sff_irq_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * sl82c105_bridge_revision - find bridge version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @pdev: PCI device for the ATA function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Locates the PCI bridge associated with the ATA function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * providing it is a Winbond 553 reports the revision. If it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * find a revision or the right device it returns -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int sl82c105_bridge_revision(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct pci_dev *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * The bridge should be part of the same device, but function 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) bridge = pci_get_slot(pdev->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Make sure it is a Winbond 553 and is an ISA bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pci_dev_put(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * We need to find function 0's revision, not function 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pci_dev_put(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return bridge->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void sl82c105_fixup(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pci_read_config_dword(pdev, 0x40, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pci_write_config_dword(pdev, 0x40, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static const struct ata_port_info info_dma = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .port_ops = &sl82c105_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static const struct ata_port_info info_early = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .port_ops = &sl82c105_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* for now use only the first port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const struct ata_port_info *ppi[] = { &info_early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rc = pcim_enable_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rev = sl82c105_bridge_revision(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (rev == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) "pata_sl82c105: Unable to find bridge, disabling DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else if (rev <= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) "pata_sl82c105: Early bridge revision, no DMA available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ppi[0] = &info_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) sl82c105_fixup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int sl82c105_reinit_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct ata_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rc = ata_pci_device_do_resume(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) sl82c105_fixup(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const struct pci_device_id sl82c105[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static struct pci_driver sl82c105_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .id_table = sl82c105,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .probe = sl82c105_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .remove = ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .resume = sl82c105_reinit_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) module_pci_driver(sl82c105_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) MODULE_DESCRIPTION("low-level driver for Sl82c105");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) MODULE_DEVICE_TABLE(pci, sl82c105);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) MODULE_VERSION(DRV_VERSION);