^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * pata_efar.c - EFAR PIIX clone controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) 2005 Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) 2009-2010 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Some parts based on ata_piix.c by Jeff Garzik and others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Intel ICH controllers the EFAR widened the UDMA mode register bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * and doesn't require the funky clock selection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRV_NAME "pata_efar"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DRV_VERSION "0.4.5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * efar_pre_reset - Enable bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @link: ATA link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Perform cable detection for the EFAR ATA interface. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * different to the PIIX arrangement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static const struct pci_bits efar_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * efar_cable_detect - check for 40/80 pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @ap: Port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Perform cable detection for the EFAR ATA interface. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * different to the PIIX arrangement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int efar_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u8 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pci_read_config_byte(pdev, 0x47, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (tmp & (2 >> ap->port_no))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static DEFINE_SPINLOCK(efar_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * efar_set_piomode - Initialize host controller PATA PIO timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @ap: Port whose timings we are configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @adev: Device to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Set PIO mode for device, in host controller PCI config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned int pio = adev->pio_mode - XFER_PIO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct pci_dev *dev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int master_port = ap->port_no ? 0x42 : 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u16 master_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u8 udma_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * See Intel Document 298600-004 for the timing programing rules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * for PIIX/ICH. The EFAR is a clone so very similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static const /* ISP RTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u8 timings[][2] = { { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) { 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { 2, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) { 2, 3 }, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (pio > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) control |= 1; /* TIME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) control |= 2; /* IE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Intel specifies that the prefetch/posting is for disk only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (adev->class == ATA_DEV_ATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) control |= 4; /* PPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_lock_irqsave(&efar_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pci_read_config_word(dev, master_port, &master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Set PPE, IE, and TIME as appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (adev->devno == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) master_data &= 0xCCF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) master_data |= control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) master_data |= (timings[pio][0] << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) (timings[pio][1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int shift = 4 * ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u8 slave_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) master_data &= 0xFF0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) master_data |= (control << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Slave timing in separate register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pci_read_config_byte(dev, 0x44, &slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) slave_data &= ap->port_no ? 0x0F : 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pci_write_config_byte(dev, 0x44, slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) master_data |= 0x4000; /* Ensure SITRE is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pci_write_config_word(dev, master_port, master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pci_read_config_byte(dev, 0x48, &udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pci_write_config_byte(dev, 0x48, udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spin_unlock_irqrestore(&efar_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * efar_set_dmamode - Initialize host controller PATA DMA timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @ap: Port whose timings we are configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @adev: Device to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Set UDMA/MWDMA mode for device, in host controller PCI config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct pci_dev *dev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u8 master_port = ap->port_no ? 0x42 : 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u16 master_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u8 speed = adev->dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int devid = adev->devno + 2 * ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u8 udma_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static const /* ISP RTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u8 timings[][2] = { { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) { 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) { 2, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) { 2, 3 }, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_lock_irqsave(&efar_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pci_read_config_word(dev, master_port, &master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pci_read_config_byte(dev, 0x48, &udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (speed >= XFER_UDMA_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned int udma = adev->dma_mode - XFER_UDMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u16 udma_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) udma_enable |= (1 << devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Load the UDMA mode number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pci_read_config_word(dev, 0x4A, &udma_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) udma_timing &= ~(7 << (4 * devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) udma_timing |= udma << (4 * devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pci_write_config_word(dev, 0x4A, udma_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * MWDMA is driven by the PIO timings. We must also enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * IORDY unconditionally along with TIME1. PPE has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * been set when the PIO timing was set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u8 slave_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const unsigned int needed_pio[3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int pio = needed_pio[mwdma] - XFER_PIO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) control = 3; /* IORDY|TIME1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* If the drive MWDMA is faster than it can do PIO then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) we must force PIO into PIO0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (adev->pio_mode < needed_pio[mwdma])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Enable DMA timing only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) control |= 8; /* PIO cycles in PIO0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (adev->devno) { /* Slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) master_data |= control << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) pci_read_config_byte(dev, 0x44, &slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) slave_data &= ap->port_no ? 0x0F : 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Load the matching timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pci_write_config_byte(dev, 0x44, slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } else { /* Master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) and master timing bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) master_data |= control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) master_data |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) (timings[pio][0] << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) (timings[pio][1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) udma_enable &= ~(1 << devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pci_write_config_word(dev, master_port, master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pci_write_config_byte(dev, 0x48, udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_unlock_irqrestore(&efar_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct scsi_host_template efar_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static struct ata_port_operations efar_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .cable_detect = efar_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .set_piomode = efar_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .set_dmamode = efar_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .prereset = efar_pre_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * efar_init_one - Register EFAR ATA PCI device with kernel services
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @pdev: PCI device to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @ent: Entry in efar_pci_tbl matching with @pdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Called from kernel PCI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Inherited from PCI layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Zero on success, or -ERRNO value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static const struct ata_port_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .mwdma_mask = ATA_MWDMA12_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .udma_mask = ATA_UDMA4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .port_ops = &efar_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) const struct ata_port_info *ppi[] = { &info, &info };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ATA_HOST_PARALLEL_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static const struct pci_device_id efar_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) { PCI_VDEVICE(EFAR, 0x9130), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) { } /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct pci_driver efar_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .id_table = efar_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .probe = efar_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .remove = ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) .resume = ata_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) module_pci_driver(efar_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) MODULE_VERSION(DRV_VERSION);