^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * pata_rdc - Driver for later RDC PATA controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This is actually a driver for hardware meeting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * INCITS 370-2004 (1510D): ATA Host Adapter Standards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on ata_piix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define DRV_NAME "pata_rdc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DRV_VERSION "0.01"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct rdc_host_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 saved_iocfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * rdc_pata_cable_detect - Probe host controller cable detect info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @ap: Port for which cable detect info is desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Read 80c cable indicator from ATA PCI device's PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * register. This register is normally set by firmware (BIOS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int rdc_pata_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct rdc_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* check BIOS cable detect results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) mask = 0x30 << (2 * ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if ((hpriv->saved_iocfg & mask) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * rdc_pata_prereset - prereset for PATA host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @link: Target link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static const struct pci_bits rdc_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static DEFINE_SPINLOCK(rdc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * rdc_set_piomode - Initialize host controller PATA PIO timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @ap: Port whose timings we are configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @adev: um
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Set PIO mode for device, in host controller PCI config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int pio = adev->pio_mode - XFER_PIO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct pci_dev *dev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int is_slave = (adev->devno != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int master_port= ap->port_no ? 0x42 : 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int slave_port = 0x44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u16 master_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u8 slave_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u8 udma_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static const /* ISP RTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 timings[][2] = { { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) { 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { 2, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) { 2, 3 }, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (pio >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) control |= 1; /* TIME1 enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (ata_pio_need_iordy(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) control |= 2; /* IE enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (adev->class == ATA_DEV_ATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) control |= 4; /* PPE enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_lock_irqsave(&rdc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* PIO configuration clears DTE unconditionally. It will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * programmed in set_dmamode which is guaranteed to be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * after set_piomode if any DMA mode is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pci_read_config_word(dev, master_port, &master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (is_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* clear TIME1|IE1|PPE1|DTE1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) master_data &= 0xff0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Enable SITRE (separate slave timing register) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) master_data |= 0x4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* enable PPE1, IE1 and TIME1 as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) master_data |= (control << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pci_read_config_byte(dev, slave_port, &slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) slave_data &= (ap->port_no ? 0x0f : 0xf0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Load the timing nibble for this slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) << (ap->port_no ? 4 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) master_data &= 0xccf0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Enable PPE, IE and TIME as appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) master_data |= control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* load ISP and RCT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) master_data |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) (timings[pio][0] << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) (timings[pio][1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pci_write_config_word(dev, master_port, master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (is_slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pci_write_config_byte(dev, slave_port, slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Ensure the UDMA bit is off - it will be turned back on if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) UDMA is selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pci_read_config_byte(dev, 0x48, &udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pci_write_config_byte(dev, 0x48, udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_irqrestore(&rdc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * rdc_set_dmamode - Initialize host controller PATA PIO timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @ap: Port whose timings we are configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @adev: Drive in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Set UDMA mode for device, in host controller PCI config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct pci_dev *dev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u8 master_port = ap->port_no ? 0x42 : 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u16 master_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u8 speed = adev->dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int devid = adev->devno + 2 * ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u8 udma_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static const /* ISP RTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u8 timings[][2] = { { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) { 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) { 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) { 2, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) { 2, 3 }, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_lock_irqsave(&rdc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pci_read_config_word(dev, master_port, &master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pci_read_config_byte(dev, 0x48, &udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (speed >= XFER_UDMA_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int udma = adev->dma_mode - XFER_UDMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u16 udma_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u16 ideconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int u_clock, u_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * UDMA is handled by a combination of clock switching and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * selection of dividers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Handy rule: Odd modes are UDMATIMx 01, even are 02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * except UDMA0 which is 00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u_speed = min(2 - (udma & 1), udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (udma == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u_clock = 0x1000; /* 100Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else if (udma > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u_clock = 1; /* 66Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u_clock = 0; /* 33Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) udma_enable |= (1 << devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Load the CT/RP selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pci_read_config_word(dev, 0x4A, &udma_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) udma_timing &= ~(3 << (4 * devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) udma_timing |= u_speed << (4 * devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pci_write_config_word(dev, 0x4A, udma_timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Select a 33/66/100Mhz clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pci_read_config_word(dev, 0x54, &ideconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ideconf &= ~(0x1001 << devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ideconf |= u_clock << devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pci_write_config_word(dev, 0x54, ideconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * MWDMA is driven by the PIO timings. We must also enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * IORDY unconditionally along with TIME1. PPE has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * been set when the PIO timing was set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned int control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u8 slave_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const unsigned int needed_pio[3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int pio = needed_pio[mwdma] - XFER_PIO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) control = 3; /* IORDY|TIME1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* If the drive MWDMA is faster than it can do PIO then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) we must force PIO into PIO0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (adev->pio_mode < needed_pio[mwdma])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Enable DMA timing only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) control |= 8; /* PIO cycles in PIO0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (adev->devno) { /* Slave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) master_data |= control << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pci_read_config_byte(dev, 0x44, &slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) slave_data &= (ap->port_no ? 0x0f : 0xf0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Load the matching timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pci_write_config_byte(dev, 0x44, slave_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } else { /* Master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) and master timing bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) master_data |= control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) master_data |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) (timings[pio][0] << 12) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) (timings[pio][1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) udma_enable &= ~(1 << devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pci_write_config_word(dev, master_port, master_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pci_write_config_byte(dev, 0x48, udma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock_irqrestore(&rdc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static struct ata_port_operations rdc_pata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .inherits = &ata_bmdma32_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .cable_detect = rdc_pata_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .set_piomode = rdc_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .set_dmamode = rdc_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .prereset = rdc_pata_prereset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static const struct ata_port_info rdc_port_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .mwdma_mask = ATA_MWDMA12_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .port_ops = &rdc_pata_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static struct scsi_host_template rdc_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * rdc_init_one - Register PIIX ATA PCI device with kernel services
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @pdev: PCI device to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @ent: Entry in rdc_pci_tbl matching with @pdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Called from kernel PCI layer. We probe for combined mode (sigh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * and then hand over control to libata, for it to do the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Inherited from PCI layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Zero on success, or -ERRNO value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct ata_port_info port_info[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct rdc_host_priv *hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) port_info[0] = rdc_port_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) port_info[1] = rdc_port_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* enable device and prepare host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Save IOCFG, this will be used for cable detection, quirk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * detection and restoration on detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) host->private_data = hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pci_intx(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) host->flags |= ATA_HOST_PARALLEL_SCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void rdc_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct ata_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct rdc_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ata_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static const struct pci_device_id rdc_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) { PCI_DEVICE(0x17F3, 0x1011), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) { PCI_DEVICE(0x17F3, 0x1012), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) { } /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static struct pci_driver rdc_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .id_table = rdc_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .probe = rdc_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .remove = rdc_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .resume = ata_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) module_pci_driver(rdc_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) MODULE_AUTHOR("Alan Cox (based on ata_piix)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) MODULE_VERSION(DRV_VERSION);