Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * pata_pdc202xx_old.c 	- Promise PDC202xx PATA for new ATA layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *			  (C) 2005 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *			  (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * First cut with LBA48/ATAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *	Channel interlock/reset on both required ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define DRV_NAME "pata_pdc202xx_old"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define DRV_VERSION "0.4.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static int pdc2026x_cable_detect(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	u16 cis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	pci_read_config_word(pdev, 0x50, &cis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if (cis & (1 << (10 + ap->port_no)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void pdc202xx_exec_command(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 				  const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	iowrite8(tf->command, ap->ioaddr.command_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	ndelay(400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static bool pdc202xx_irq_check(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	unsigned long master	= pci_resource_start(pdev, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	u8 sc1d			= inb(master + 0x1d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (ap->port_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		 * bit 7: error, bit 6: interrupting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		 * bit 5: FIFO full, bit 4: FIFO empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		return sc1d & 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	} else	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		 * bit 3: error, bit 2: interrupting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		 * bit 1: FIFO full, bit 0: FIFO empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return sc1d & 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *	pdc202xx_configure_piomode	-	set chip PIO timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *	@pio: PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *	Called to do the PIO mode setup. Our timing registers are shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *	so a configure_dmamode call will undo any work we do here and vice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *	versa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	static u16 pio_timing[5] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		0x0913, 0x050C , 0x0308, 0x0206, 0x0104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	u8 r_ap, r_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	pci_read_config_byte(pdev, port, &r_ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	pci_read_config_byte(pdev, port + 1, &r_bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	r_ap &= ~0x3F;	/* Preserve ERRDY_EN, SYNC_IN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	r_bp &= ~0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	r_ap |= (pio_timing[pio] >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	r_bp |= (pio_timing[pio] & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (ata_pio_need_iordy(adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		r_ap |= 0x20;	/* IORDY enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (adev->class == ATA_DEV_ATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		r_ap |= 0x10;	/* FIFO enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	pci_write_config_byte(pdev, port, r_ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	pci_write_config_byte(pdev, port + 1, r_bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  *	pdc202xx_set_piomode	-	set initial PIO mode data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *	Called to do the PIO mode setup. Our timing registers are shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *	but we want to set the PIO timing by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *	pdc202xx_configure_dmamode	-	set DMA mode in chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  *	@ap: ATA interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  *	@adev: ATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  *	Load DMA cycle times into the chip ready for a DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *	to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	static u8 udma_timing[6][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		{ 0x60, 0x03 },	/* 33 Mhz Clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		{ 0x40, 0x02 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		{ 0x20, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		{ 0x40, 0x02 },	/* 66 Mhz Clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		{ 0x20, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		{ 0x20, 0x01 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	static u8 mdma_timing[3][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		{ 0xe0, 0x0f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		{ 0x60, 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		{ 0x60, 0x03 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	u8 r_bp, r_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	pci_read_config_byte(pdev, port + 1, &r_bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	pci_read_config_byte(pdev, port + 2, &r_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	r_bp &= ~0xE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	r_cp &= ~0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (adev->dma_mode >= XFER_UDMA_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		int speed = adev->dma_mode - XFER_UDMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		r_bp |= udma_timing[speed][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		r_cp |= udma_timing[speed][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		int speed = adev->dma_mode - XFER_MW_DMA_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		r_bp |= mdma_timing[speed][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		r_cp |= mdma_timing[speed][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	pci_write_config_byte(pdev, port + 1, r_bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	pci_write_config_byte(pdev, port + 2, r_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *	pdc2026x_bmdma_start		-	DMA engine begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *	@qc: ATA command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *	In UDMA3 or higher we have to clock switch for the duration of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *	DMA transfer sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *	Note: The host lock held by the libata layer protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *	us from two channels both trying to set DMA bits at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct ata_device *adev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct ata_taskfile *tf = &qc->tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	int sel66 = ap->port_no ? 0x08: 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	void __iomem *clock = master + 0x11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* Check we keep host level locking here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (adev->dma_mode > XFER_UDMA_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		iowrite8(ioread8(clock) | sel66, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		iowrite8(ioread8(clock) & ~sel66, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	   and move to qc_issue ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	pdc202xx_set_dmamode(ap, qc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* Cases the state machine will not complete correctly without help */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATAPI_PROT_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		len = qc->nbytes / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		if (tf->flags & ATA_TFLAG_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			len |= 0x06000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			len |= 0x05000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		iowrite32(len, atapi_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* Activate DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	ata_bmdma_start(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *	pdc2026x_bmdma_end		-	DMA engine stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  *	@qc: ATA command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  *	After a DMA completes we need to put the clock back to 33MHz for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *	PIO timings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *	Note: The host lock held by the libata layer protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *	us from two channels both trying to set DMA bits at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct ata_device *adev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct ata_taskfile *tf = &qc->tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	int sel66 = ap->port_no ? 0x08: 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	/* The clock bits are in the same register for both channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	void __iomem *clock = master + 0x11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Cases the state machine will not complete correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		iowrite32(0, atapi_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		iowrite8(ioread8(clock) & ~sel66, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	/* Flip back to 33Mhz for PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (adev->dma_mode > XFER_UDMA_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		iowrite8(ioread8(clock) & ~sel66, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	ata_bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	pdc202xx_set_piomode(ap, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *	pdc2026x_dev_config	-	device setup hook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  *	@adev: newly found device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *	Perform chip specific early setup. We need to lock the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *	sizes to 8bit to avoid making the state engine on the 2026x cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  *	barf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void pdc2026x_dev_config(struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	adev->max_sectors = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int pdc2026x_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (bmdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		/* Enable burst mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		u8 burst = ioread8(bmdma + 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		iowrite8(burst | 0x01, bmdma + 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return ata_bmdma_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *	pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *	@qc: Metadata associated with taskfile to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  *	Just say no - not supported on older Promise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  *	None (inherited from caller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  *	RETURNS: 0 when ATAPI DMA can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *		 1 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static struct scsi_host_template pdc202xx_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	ATA_BMDMA_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static struct ata_port_operations pdc2024x_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	.inherits		= &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	.cable_detect		= ata_cable_40wire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	.set_piomode		= pdc202xx_set_piomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	.set_dmamode		= pdc202xx_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	.sff_exec_command	= pdc202xx_exec_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	.sff_irq_check		= pdc202xx_irq_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct ata_port_operations pdc2026x_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	.inherits		= &pdc2024x_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	.check_atapi_dma	= pdc2026x_check_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	.bmdma_start		= pdc2026x_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	.bmdma_stop		= pdc2026x_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	.cable_detect		= pdc2026x_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	.dev_config		= pdc2026x_dev_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	.port_start		= pdc2026x_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.sff_exec_command	= pdc202xx_exec_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.sff_irq_check		= pdc202xx_irq_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	static const struct ata_port_info info[3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			.flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			.pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			.mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			.udma_mask = ATA_UDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			.port_ops = &pdc2024x_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			.flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			.pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			.mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			.udma_mask = ATA_UDMA4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			.port_ops = &pdc2026x_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			.flags = ATA_FLAG_SLAVE_POSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			.pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			.mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			.udma_mask = ATA_UDMA5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			.port_ops = &pdc2026x_port_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		struct pci_dev *bridge = dev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		/* Don't grab anything behind a Promise I2O RAID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static const struct pci_device_id pdc202xx[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static struct pci_driver pdc202xx_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	.name 		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	.id_table	= pdc202xx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	.probe 		= pdc202xx_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	.remove		= ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	.suspend	= ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.resume		= ata_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) module_pci_driver(pdc202xx_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) MODULE_AUTHOR("Alan Cox");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) MODULE_DEVICE_TABLE(pci, pdc202xx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) MODULE_VERSION(DRV_VERSION);