Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * pata_atiixp.c 	- ATI PATA for new ATA layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *			  (C) 2005 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *			  (C) 2009-2010 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  linux/drivers/ide/pci/atiixp.c	Version 0.01-bart2	Feb. 26, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *  Copyright (C) 2003 ATI Inc. <hyu@ati.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *  Copyright (C) 2004 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define DRV_NAME "pata_atiixp"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define DRV_VERSION "0.4.6"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	ATIIXP_IDE_PIO_TIMING	= 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	ATIIXP_IDE_MWDMA_TIMING	= 0x44,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	ATIIXP_IDE_PIO_CONTROL	= 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	ATIIXP_IDE_PIO_MODE	= 0x4a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	ATIIXP_IDE_UDMA_CONTROL	= 0x54,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	ATIIXP_IDE_UDMA_MODE 	= 0x56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		/* Board has onboard PATA<->SATA converters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		.ident = "MSI E350DM-E33",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		.matches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 			DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static int atiixp_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	u8 udma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (dmi_check_system(attixp_cable_override_dmi_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return ATA_CBL_PATA40_SHORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	/* Hack from drivers/ide/pci. Really we want to know how to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	   raw detection not play follow the bios mode guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		return  ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static DEFINE_SPINLOCK(atiixp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *	atiixp_prereset	-	perform reset handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *	@link: ATA link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *	@deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *	Reset sequence checking enable bits to see which ports are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *	active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	static const struct pci_bits atiixp_enable_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		{ 0x48, 1, 0x01, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		{ 0x48, 1, 0x08, 0x00 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return ata_sff_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *	atiixp_set_pio_timing	-	set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *	Called by both the pio and dma setup functions to set the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *	timings for PIO transfers. We must load both the mode number and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *	timing values into the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int dn = 2 * ap->port_no + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	u32 pio_timing_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	u16 pio_mode_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	pio_mode_data &= ~(0x7 << (4 * dn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	pio_mode_data |= pio << (4 * dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	pio_timing_data &= ~(0xFF << timing_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	pio_timing_data |= (pio_timings[pio] << timing_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  *	atiixp_set_piomode	-	set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  *	Called to do the PIO mode setup. We use a shared helper for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *	as the DMA setup must also adjust the PIO timing information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	spin_lock_irqsave(&atiixp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	spin_unlock_irqrestore(&atiixp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *	atiixp_set_dmamode	-	set initial DMA mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *	Called to do the DMA mode setup. We use timing tables for most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *	modes but must tune an appropriate PIO mode to match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int dma = adev->dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int dn = 2 * ap->port_no + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	int wanted_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	spin_lock_irqsave(&atiixp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (adev->dma_mode >= XFER_UDMA_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		u16 udma_mode_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		dma -= XFER_UDMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		udma_mode_data &= ~(0x7 << (4 * dn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		udma_mode_data |= dma << (4 * dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		u32 mwdma_timing_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		dma -= XFER_MW_DMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				      &mwdma_timing_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		mwdma_timing_data &= ~(0xFF << timing_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 				       mwdma_timing_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 *	We must now look at the PIO mode situation. We may need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 *	adjust the PIO mode to keep the timings acceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (adev->dma_mode >= XFER_MW_DMA_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		wanted_pio = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	else if (adev->dma_mode == XFER_MW_DMA_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		wanted_pio = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	else if (adev->dma_mode == XFER_MW_DMA_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		wanted_pio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	else BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (adev->pio_mode != wanted_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		atiixp_set_pio_timing(ap, adev, wanted_pio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	spin_unlock_irqrestore(&atiixp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *	atiixp_bmdma_start	-	DMA start callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *	@qc: Command in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *	When DMA begins we need to ensure that the UDMA control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *	register for the channel is correctly set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *	Note: The host lock held by the libata layer protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  *	us from two channels both trying to set DMA bits at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct ata_device *adev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	int dn = (2 * ap->port_no) + adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	u16 tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (ata_using_udma(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		tmp16 |= (1 << dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		tmp16 &= ~(1 << dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	ata_bmdma_start(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *	atiixp_dma_stop	-	DMA stop callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *	@qc: Command in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *	DMA has completed. Clear the UDMA flag as the next operations will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  *	be PIO ones not UDMA data transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  *	Note: The host lock held by the libata layer protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  *	us from two channels both trying to set DMA bits at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	int dn = (2 * ap->port_no) + qc->dev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	u16 tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	tmp16 &= ~(1 << dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	ata_bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static struct scsi_host_template atiixp_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct ata_port_operations atiixp_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	.inherits	= &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.qc_prep 	= ata_bmdma_dumb_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.bmdma_start 	= atiixp_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	.bmdma_stop	= atiixp_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.prereset	= atiixp_prereset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.cable_detect	= atiixp_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	.set_piomode	= atiixp_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	.set_dmamode	= atiixp_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	static const struct ata_port_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		.flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		.pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		.mwdma_mask = ATA_MWDMA12_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		.udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		.port_ops = &atiixp_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	const struct ata_port_info *ppi[] = { &info, &info };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/* SB600 doesn't have secondary port wired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		ppi[1] = &ata_dummy_port_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 				      ATA_HOST_PARALLEL_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static const struct pci_device_id atiixp[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static struct pci_driver atiixp_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	.name 		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	.id_table	= atiixp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	.probe 		= atiixp_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	.remove		= ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	.resume		= ata_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	.suspend	= ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) module_pci_driver(atiixp_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) MODULE_DEVICE_TABLE(pci, atiixp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) MODULE_VERSION(DRV_VERSION);