^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * pata_amd.c - AMD PATA for new ATA layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (C) 2005-2006 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on pata-sil680. Errata information is taken from data sheets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * claimed by sata-nv.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Variable system clock when/if it makes sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Power management on ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Documentation publicly available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DRV_NAME "pata_amd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_VERSION "0.4.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * timing_setup - shared timing computation and load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @ap: ATA port being set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @adev: drive being configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @offset: port offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @speed: target speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @clock: clock multiplier (number of times 33MHz for this part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Perform the actual timing set up for Nvidia or AMD PATA devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * The actual devices vary so they all call into this helper function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * providing the clock multipler and offset (because AMD and Nvidia put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * the ports at different locations).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const unsigned char amd_cyc2udma[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct ata_device *peer = ata_dev_pair(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int dn = ap->port_no * 2 + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ata_timing at, apeer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int T, UT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) const int amd_clock = 33333; /* KHz. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u8 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) T = 1000000000 / amd_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) UT = T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (clock >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) UT = T / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dev_err(&pdev->dev, "unknown mode %d\n", speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* This may be over conservative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (peer->dma_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Now do the setup work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* Configure the address set up timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pci_read_config_byte(pdev, offset + 0x0C, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pci_write_config_byte(pdev, offset + 0x0C , t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Configure the 8bit I/O timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Drive timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) switch (clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* UDMA timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (at.udma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * amd_pre_reset - perform reset handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @link: ATA link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Reset sequence checking enable bits to see which ports are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static const struct pci_bits amd_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { 0x40, 1, 0x02, 0x02 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) { 0x40, 1, 0x01, 0x01 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * amd_cable_detect - report cable type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @ap: port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * AMD controller/BIOS setups record the cable type in word 0x42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int amd_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static const u32 bitmask[2] = {0x03, 0x0C};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u8 ata66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pci_read_config_byte(pdev, 0x42, &ata66);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (ata66 & bitmask[ap->port_no])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * amd_fifo_setup - set the PIO FIFO for ATA/ATAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Set the PCI fifo for this device according to the devices present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * on the bus at this point in time. We need to turn the post write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * off for ATAPI devices as we may need to issue a word sized write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * device as the final I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void amd_fifo_setup(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct ata_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static const u8 fifobit[2] = { 0xC0, 0x30};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u8 fifo = fifobit[ap->port_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u8 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ata_for_each_dev(adev, &ap->link, ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (adev->class == ATA_DEV_ATAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) fifo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) fifo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* On the later chips the read prefetch bits become no-op bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pci_read_config_byte(pdev, 0x41, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) r &= ~fifobit[ap->port_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) r |= fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pci_write_config_byte(pdev, 0x41, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * amd33_set_piomode - set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Program the AMD registers for PIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) amd_fifo_setup(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) amd_fifo_setup(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) amd_fifo_setup(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) amd_fifo_setup(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * amd33_set_dmamode - set initial DMA mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Program the MWDMA/UDMA modes for the AMD and Nvidia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * chipset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Both host-side and drive-side detection results are worthless on NV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * PATAs. Ignore them and just follow what BIOS configured. Both the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * current configuration in PCI config reg and ACPI GTM result are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * cached during driver attach and are consulted to select transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static unsigned long nv_mode_filter(struct ata_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned long xfer_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static const unsigned int udma_mask_map[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct ata_port *ap = dev->link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) char acpi_str[32] = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 saved_udma, udma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) const struct ata_acpi_gtm *gtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long bios_limit = 0, acpi_limit = 0, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* find out what BIOS configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) udma = saved_udma = (unsigned long)ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (ap->port_no == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) udma >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (dev->devno == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) udma >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if ((udma & 0xc0) == 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* consult ACPI GTM too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) gtm = ata_acpi_init_gtm(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (gtm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* be optimistic, EH can take care of things if something goes wrong */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) limit = bios_limit | acpi_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* If PIO or DMA isn't configured at all, don't limit. Let EH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!(limit & ATA_MASK_PIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) limit |= ATA_MASK_PIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* PIO4, MWDMA2, UDMA2 should always be supported regardless of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cable detection result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) xfer_mask, limit, xfer_mask & limit, bios_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) saved_udma, acpi_limit, acpi_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return xfer_mask & limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * nv_probe_init - cable detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @lin: ATA link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Perform cable detection. The BIOS stores this in PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * space for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static const struct pci_bits nv_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) { 0x50, 1, 0x02, 0x02 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) { 0x50, 1, 0x01, 0x01 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * nv100_set_piomode - set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Program the AMD registers for PIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * nv100_set_dmamode - set initial DMA mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * @ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Program the MWDMA/UDMA modes for the AMD and Nvidia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * chipset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void nv_host_stop(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) u32 udma = (unsigned long)host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* restore PCI config register 0x60 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct scsi_host_template amd_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static const struct ata_port_operations amd_base_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .inherits = &ata_bmdma32_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .prereset = amd_pre_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static struct ata_port_operations amd33_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .inherits = &amd_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) .cable_detect = ata_cable_40wire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) .set_piomode = amd33_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) .set_dmamode = amd33_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static struct ata_port_operations amd66_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .inherits = &amd_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .cable_detect = ata_cable_unknown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .set_piomode = amd66_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .set_dmamode = amd66_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static struct ata_port_operations amd100_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .inherits = &amd_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .cable_detect = ata_cable_unknown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .set_piomode = amd100_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .set_dmamode = amd100_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static struct ata_port_operations amd133_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) .inherits = &amd_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) .cable_detect = amd_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) .set_piomode = amd133_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .set_dmamode = amd133_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static const struct ata_port_operations nv_base_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .cable_detect = ata_cable_ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .mode_filter = nv_mode_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) .prereset = nv_pre_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .host_stop = nv_host_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static struct ata_port_operations nv100_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .inherits = &nv_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .set_piomode = nv100_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .set_dmamode = nv100_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static struct ata_port_operations nv133_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .inherits = &nv_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .set_piomode = nv133_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .set_dmamode = nv133_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void amd_clear_fifo(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) u8 fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Disable the FIFO, the FIFO logic will re-enable it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pci_read_config_byte(pdev, 0x41, &fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) fifo &= 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pci_write_config_byte(pdev, 0x41, fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static const struct ata_port_info info[10] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) { /* 0: AMD 7401 - no swdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .udma_mask = ATA_UDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .port_ops = &amd33_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) { /* 1: Early AMD7409 - no swdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .udma_mask = ATA_UDMA4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .port_ops = &amd66_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) { /* 2: AMD 7409 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .udma_mask = ATA_UDMA4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .port_ops = &amd66_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) { /* 3: AMD 7411 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) .port_ops = &amd100_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) { /* 4: AMD 7441 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .port_ops = &amd100_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) { /* 5: AMD 8111 - no swdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .udma_mask = ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .port_ops = &amd133_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) { /* 6: AMD 8111 UDMA 100 (Serenade) - no swdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .port_ops = &amd133_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) { /* 7: Nvidia Nforce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) .port_ops = &nv100_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) { /* 8: Nvidia Nforce2 and later - no swdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .udma_mask = ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .port_ops = &nv133_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) { /* 9: AMD CS5536 (Geode companion) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) .udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) .port_ops = &amd100_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) const struct ata_port_info *ppi[] = { NULL, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int type = id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void *hpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u8 fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pci_read_config_byte(pdev, 0x41, &fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Check for AMD7409 without swdma errata and if found adjust type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (type == 1 && pdev->revision > 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) type = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Serenade ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) type = 6; /* UDMA 100 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Okay, type is determined now. Apply type-specific workarounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ppi[0] = &info[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (type < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ata_pci_bmdma_clear_simplex(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (pdev->vendor == PCI_VENDOR_ID_AMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) amd_clear_fifo(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Cable detection on Nvidia chips doesn't work too well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * cache BIOS programmed UDMA mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (type == 7 || type == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u32 udma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pci_read_config_dword(pdev, 0x60, &udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) hpriv = (void *)(unsigned long)udma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* And fire it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int amd_reinit_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct ata_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rc = ata_pci_device_do_resume(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (pdev->vendor == PCI_VENDOR_ID_AMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) amd_clear_fifo(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ata_pci_bmdma_clear_simplex(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static const struct pci_device_id amd[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static struct pci_driver amd_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) .id_table = amd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .probe = amd_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .remove = ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .suspend = ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .resume = amd_reinit_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) module_pci_driver(amd_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) MODULE_DEVICE_TABLE(pci, amd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) MODULE_VERSION(DRV_VERSION);