^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * pata_hpt3x3 - HPT3x3 driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * (c) Copyright 2005-2006 Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Was pata_hpt34x but the naming was confusing as it supported the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * 343 and 363 so it has been renamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * May be copied or modified under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define DRV_NAME "pata_hpt3x3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRV_VERSION "0.6.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * hpt3x3_set_piomode - PIO setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @adev: device on the interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Set our PIO requirements. This is fairly simple on the HPT3x3 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * all we have to do is clear the MWDMA and UDMA bits then load the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * mode number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 r1, r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int dn = 2 * ap->port_no + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pci_read_config_dword(pdev, 0x44, &r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pci_read_config_dword(pdev, 0x48, &r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Load the PIO timing number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) r1 &= ~(7 << (3 * dn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pci_write_config_dword(pdev, 0x44, r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pci_write_config_dword(pdev, 0x48, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #if defined(CONFIG_PATA_HPT3X3_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * hpt3x3_set_dmamode - DMA timing setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @adev: Device being configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Set up the channel for MWDMA or UDMA modes. Much the same as with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * PIO, load the mode number and then set MWDMA or UDMA flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 r1, r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int dn = 2 * ap->port_no + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int mode_num = adev->dma_mode & 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) pci_read_config_dword(pdev, 0x44, &r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pci_read_config_dword(pdev, 0x48, &r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Load the timing number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) r1 &= ~(7 << (3 * dn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) r1 |= (mode_num << (3 * dn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (adev->dma_mode >= XFER_UDMA_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) r2 |= (0x01 << dn); /* Ultra mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) r2 |= (0x10 << dn); /* MWDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pci_write_config_dword(pdev, 0x44, r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pci_write_config_dword(pdev, 0x48, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * hpt3x3_freeze - DMA workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @ap: port to freeze
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * When freezing an HPT3x3 we must stop any pending DMA before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * writing to the control register or the chip will hang
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void hpt3x3_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __iomem *mmio = ap->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mmio + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ata_sff_dma_pause(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ata_sff_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * hpt3x3_bmdma_setup - DMA workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @qc: Queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * When issuing BMDMA we must clean up the error/active bits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * software on this device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) r |= ATA_DMA_INTR | ATA_DMA_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return ata_bmdma_setup(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * hpt3x3_atapi_dma - ATAPI DMA check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @qc: Queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Just say no - we don't do ATAPI DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif /* CONFIG_PATA_HPT3X3_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static struct scsi_host_template hpt3x3_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct ata_port_operations hpt3x3_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .cable_detect = ata_cable_40wire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .set_piomode = hpt3x3_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #if defined(CONFIG_PATA_HPT3X3_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .set_dmamode = hpt3x3_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .bmdma_setup = hpt3x3_bmdma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) .check_atapi_dma= hpt3x3_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) .freeze = hpt3x3_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * hpt3x3_init_chipset - chip setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @dev: PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Perform the setup required at boot and on resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void hpt3x3_init_chipset(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u16 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Initialize the board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pci_write_config_word(dev, 0x80, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pci_read_config_word(dev, PCI_COMMAND, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (cmd & PCI_COMMAND_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * hpt3x3_init_one - Initialise an HPT343/363
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @pdev: PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @id: Entry in match table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Perform basic initialisation. We set the device up so we access all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * ports via BAR4. This is necessary to work around errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static const struct ata_port_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #if defined(CONFIG_PATA_HPT3X3_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Further debug needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .udma_mask = ATA_UDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .port_ops = &hpt3x3_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Register offsets of taskfiles in BAR4 area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static const u8 offset_cmd[2] = { 0x20, 0x28 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static const u8 offset_ctl[2] = { 0x36, 0x3E };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const struct ata_port_info *ppi[] = { &info, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) hpt3x3_init_chipset(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* acquire resources and fill host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Everything is relative to BAR4 if we set up this way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pcim_pin_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) host->iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) base = host->iomap[4]; /* Bus mastering base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ioaddr->cmd_addr = base + offset_cmd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ioaddr->altstatus_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ioaddr->ctl_addr = base + offset_ctl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ioaddr->scr_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ata_sff_std_ports(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ioaddr->bmdma_addr = base + 8 * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ata_port_pbar_desc(ap, 4, -1, "ioport");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) IRQF_SHARED, &hpt3x3_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int hpt3x3_reinit_one(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct ata_host *host = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rc = ata_pci_device_do_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) hpt3x3_init_chipset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static const struct pci_device_id hpt3x3[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static struct pci_driver hpt3x3_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .id_table = hpt3x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .probe = hpt3x3_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .remove = ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .resume = hpt3x3_reinit_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) module_pci_driver(hpt3x3_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) MODULE_DEVICE_TABLE(pci, hpt3x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) MODULE_VERSION(DRV_VERSION);