Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * sata_mv.c - Marvell SATA support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2008-2009: Marvell Corporation, all rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright 2005: EMC Corporation, all rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright 2005 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Originally written by Brett Russ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * sata_mv TODO list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * --> Develop a low-power-consumption strategy, and implement it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * --> [Experiment, Marvell value added] Is it possible to use target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *       creating LibATA target mode support would be very interesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *       Target mode, for those without docs, is the ability to directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *       connect two SATA ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * 80x1-B2 errata PCI#11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * Users of the 6041/6081 Rev.B2 chips (current is C0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * should be careful to insert those cards only onto PCI-X bus #0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * and only in device slots 0..7, not higher.  The chips may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * work correctly otherwise  (note: this is a pretty rare condition).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/phy/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/ata_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/mbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define DRV_NAME	"sata_mv"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define DRV_VERSION	"1.28"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * module options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static int msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) module_param(msi, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static int irq_coalescing_io_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) module_param(irq_coalescing_io_count, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) MODULE_PARM_DESC(irq_coalescing_io_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		 "IRQ coalescing I/O count threshold (0..255)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int irq_coalescing_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) module_param(irq_coalescing_usecs, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) MODULE_PARM_DESC(irq_coalescing_usecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		 "IRQ coalescing time threshold in usecs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	/* BAR's are enumerated in terms of pci_resource_start() terms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	MV_PCI_REG_BASE		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * Per-chip ("all ports") interrupt coalescing feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * This is only for GEN_II / GEN_IIE hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 * Coalescing defers the interrupt until either the IO_THRESHOLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	COAL_REG_BASE		= 0x18000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	 * Registers for the (unused here) transaction coalescing feature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	SATAHC0_REG_BASE	= 0x20000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	FLASH_CTL		= 0x1046c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	GPIO_PORT_CTL		= 0x104f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	RESET_CFG		= 0x180d8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	MV_MAX_Q_DEPTH		= 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * CRPB needs alignment on a 256B boundary. Size == 256B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	MV_MAX_SG_CT		= 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	MV_PORT_HC_SHIFT	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	/* Host Flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	CRQB_FLAG_READ		= (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	CRQB_TAG_SHIFT		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	CRQB_CMD_ADDR_SHIFT	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	CRQB_CMD_CS		= (0x2 << 11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	CRQB_CMD_LAST		= (1 << 15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	CRPB_FLAG_STATUS_SHIFT	= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	/* PCI interface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	MV_PCI_COMMAND		= 0xc00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	PCI_MAIN_CMD_STS	= 0xd30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	STOP_PCI_MASTER		= (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	PCI_MASTER_EMPTY	= (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	GLOB_SFT_RST		= (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	MV_PCI_MODE		= 0xd00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	MV_PCI_MODE_MASK	= 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	MV_PCI_DISC_TIMER	= 0xd04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	MV_PCI_MSI_TRIGGER	= 0xc38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	MV_PCI_SERR_MASK	= 0xc28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	MV_PCI_XBAR_TMOUT	= 0x1d04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	MV_PCI_ERR_COMMAND	= 0x1d50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	PCI_IRQ_CAUSE		= 0x1d58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	PCI_IRQ_MASK		= 0x1d5c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	PCIE_IRQ_CAUSE		= 0x1900,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	PCIE_IRQ_MASK		= 0x1910,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	PCI_ERR			= (1 << 18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	GPIO_INT		= (1 << 22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	SELF_INT		= (1 << 23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	TWSI_INT		= (1 << 24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/* SATAHC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	HC_CFG			= 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	HC_IRQ_CAUSE		= 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	DMA_IRQ			= (1 << 0),	/* shift by port # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	DEV_IRQ			= (1 << 8),	/* shift by port # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * Per-HC (Host-Controller) interrupt coalescing feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * This is present on all chip generations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 * Coalescing defers the interrupt until either the IO_THRESHOLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	SOC_LED_CTRL		= 0x2c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 						/*  with dev activity LED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	/* Shadow block registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	SHD_BLK			= 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	/* SATA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	SATA_ACTIVE		= 0x350,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	FIS_IRQ_CAUSE		= 0x364,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	LTMODE			= 0x30c,	/* requires read-after-write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	PHY_MODE2		= 0x330,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	PHY_MODE3		= 0x310,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	PHY_MODE4		= 0x314,	/* requires read-after-write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	SATA_IFCTL		= 0x344,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	SATA_TESTCTL		= 0x348,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	SATA_IFSTAT		= 0x34c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	VENDOR_UNIQUE_FIS	= 0x35c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	FISCFG			= 0x360,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	PHY_MODE9_GEN2		= 0x398,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	PHY_MODE9_GEN1		= 0x39c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	MV5_PHY_MODE		= 0x74,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	MV5_LTMODE		= 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	MV5_PHY_CTL		= 0x0C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	SATA_IFCFG		= 0x050,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	LP_PHY_CTL		= 0x058,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	MV_M2_PREAMP_MASK	= 0x7e0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/* Port registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	EDMA_CFG		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	EDMA_ERR_IRQ_CAUSE	= 0x8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	EDMA_ERR_IRQ_MASK	= 0xc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	EDMA_ERR_OVERRUN_5	= (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 				  EDMA_ERR_LNK_CTRL_RX_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 				  EDMA_ERR_LNK_CTRL_RX_3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 				  EDMA_ERR_LNK_CTRL_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				  EDMA_ERR_PRD_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 				  EDMA_ERR_DEV_DCON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				  EDMA_ERR_DEV_CON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 				  EDMA_ERR_SERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 				  EDMA_ERR_SELF_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				  EDMA_ERR_CRQB_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				  EDMA_ERR_CRPB_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				  EDMA_ERR_INTRL_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 				  EDMA_ERR_IORDY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				  EDMA_ERR_LNK_CTRL_RX_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 				  EDMA_ERR_LNK_DATA_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				  EDMA_ERR_LNK_DATA_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				  EDMA_ERR_TRANS_PROTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 				  EDMA_ERR_PRD_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				  EDMA_ERR_DEV_DCON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				  EDMA_ERR_DEV_CON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				  EDMA_ERR_OVERRUN_5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 				  EDMA_ERR_UNDERRUN_5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				  EDMA_ERR_SELF_DIS_5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				  EDMA_ERR_CRQB_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				  EDMA_ERR_CRPB_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				  EDMA_ERR_INTRL_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				  EDMA_ERR_IORDY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	EDMA_REQ_Q_BASE_HI	= 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	EDMA_REQ_Q_OUT_PTR	= 0x18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	EDMA_REQ_Q_PTR_SHIFT	= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	EDMA_RSP_Q_BASE_HI	= 0x1c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	EDMA_RSP_Q_IN_PTR	= 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	EDMA_RSP_Q_PTR_SHIFT	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	EDMA_CMD		= 0x28,		/* EDMA command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	EDMA_EN			= (1 << 0),	/* enable EDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	EDMA_STATUS		= 0x30,		/* EDMA engine status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	EDMA_IORDY_TMOUT	= 0x34,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	EDMA_ARB_CFG		= 0x38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	BMDMA_CMD		= 0x224,	/* bmdma command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	BMDMA_STATUS		= 0x228,	/* bmdma status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/* Host private flags (hp_flags) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	MV_HP_FLAG_MSI		= (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	MV_HP_ERRATA_50XXB0	= (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	MV_HP_ERRATA_50XXB2	= (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	MV_HP_ERRATA_60X1B2	= (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	MV_HP_ERRATA_60X1C0	= (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	/* Port private flags (pp_flags) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) #define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) #define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* DMA boundary 0xffff is required by the s/g splitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * we need on /length/ in mv_fill-sg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	MV_DMA_BOUNDARY		= 0xffffU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	/* mask of register bits containing lower 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * of EDMA request queue DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* ditto, for response queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) enum chip_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	chip_504x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	chip_508x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	chip_5080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	chip_604x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	chip_608x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	chip_6042,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	chip_7042,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	chip_soc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) /* Command ReQuest Block: 32B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) struct mv_crqb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	__le32			sg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	__le32			sg_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	__le16			ctrl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	__le16			ata_cmd[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) struct mv_crqb_iie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	__le32			addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	__le32			addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	__le32			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	__le32			len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	__le32			ata_cmd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) /* Command ResPonse Block: 8B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) struct mv_crpb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	__le16			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	__le16			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	__le32			tmstmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) struct mv_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	__le32			addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	__le32			flags_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	__le32			addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	__le32			reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  * We keep a local cache of a few frequently accessed port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * registers here, to avoid having to read them (very slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * when switching between EDMA and non-EDMA modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) struct mv_cached_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	u32			fiscfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	u32			ltmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	u32			haltcond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	u32			unknown_rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) struct mv_port_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct mv_crqb		*crqb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	dma_addr_t		crqb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct mv_crpb		*crpb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	dma_addr_t		crpb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned int		req_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	unsigned int		resp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	u32			pp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct mv_cached_regs	cached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	unsigned int		delayed_eh_pmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) struct mv_port_signal {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	u32			amps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	u32			pre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) struct mv_host_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	u32			hp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	unsigned int 		board_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	u32			main_irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct mv_port_signal	signal[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	const struct mv_hw_ops	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	int			n_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	void __iomem		*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	void __iomem		*main_irq_cause_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	void __iomem		*main_irq_mask_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	u32			irq_cause_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	u32			irq_mask_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	u32			unmask_all_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * Needed on some devices that require their clocks to be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * These are optional: if the platform device does not have any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 * clocks, they won't be used.  Also, if the underlying hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * all the clock operations become no-ops (see clk.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct clk              **port_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 * Some devices have a SATA PHY which can be enabled/disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	 * in order to save power. These are optional: if the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * devices does not have any phy, they won't be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct phy		**port_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * These consistent DMA memory pools give us guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * alignment for hardware-accessed data structures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * and less memory waste in accomplishing the alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	struct dma_pool		*crqb_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	struct dma_pool		*crpb_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	struct dma_pool		*sg_tbl_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) struct mv_hw_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			   unsigned int port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			   void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			unsigned int n_hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static int mv_port_start(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static void mv_port_stop(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static int mv_qc_defer(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) static int mv_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static void mv_eh_freeze(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static void mv_eh_thaw(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) static void mv6_dev_config(struct ata_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			   unsigned int port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			   void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			unsigned int n_hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			   unsigned int port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			   void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			unsigned int n_hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 				      void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				      void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 				  void __iomem *mmio, unsigned int n_hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				      void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 				  void __iomem *mmio, unsigned int port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			     unsigned int port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static int mv_stop_edma(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static int mv_stop_edma_engine(void __iomem *port_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static void mv_pmp_select(struct ata_port *ap, int pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) static int  mv_softreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				unsigned long deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) static void mv_pmp_error_handler(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static void mv_process_crpb_entries(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 					struct mv_port_priv *pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static void mv_sff_irq_clear(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) static void mv_bmdma_setup(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static void mv_bmdma_start(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static void mv_bmdma_stop(struct ata_queued_cmd *qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static u8   mv_bmdma_status(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static u8 mv_sff_check_status(struct ata_port *ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * because we have to allow room for worst case splitting of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * PRDs for 64K boundaries in mv_fill_sg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static struct scsi_host_template mv5_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	ATA_BASE_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	.sg_tablesize		= MV_MAX_SG_CT / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	.dma_boundary		= MV_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static struct scsi_host_template mv6_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	ATA_NCQ_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	.can_queue		= MV_MAX_Q_DEPTH - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	.sg_tablesize		= MV_MAX_SG_CT / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	.dma_boundary		= MV_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static struct ata_port_operations mv5_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	.inherits		= &ata_sff_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	.lost_interrupt		= ATA_OP_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	.qc_defer		= mv_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	.qc_prep		= mv_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	.qc_issue		= mv_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	.freeze			= mv_eh_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	.thaw			= mv_eh_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	.hardreset		= mv_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	.scr_read		= mv5_scr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	.scr_write		= mv5_scr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	.port_start		= mv_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	.port_stop		= mv_port_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static struct ata_port_operations mv6_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	.inherits		= &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	.lost_interrupt		= ATA_OP_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	.qc_defer		= mv_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	.qc_prep		= mv_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	.qc_issue		= mv_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	.dev_config             = mv6_dev_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	.freeze			= mv_eh_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	.thaw			= mv_eh_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	.hardreset		= mv_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	.softreset		= mv_softreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	.pmp_hardreset		= mv_pmp_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	.pmp_softreset		= mv_softreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	.error_handler		= mv_pmp_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	.scr_read		= mv_scr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	.scr_write		= mv_scr_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	.sff_check_status	= mv_sff_check_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	.sff_irq_clear		= mv_sff_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	.check_atapi_dma	= mv_check_atapi_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	.bmdma_setup		= mv_bmdma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	.bmdma_start		= mv_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	.bmdma_stop		= mv_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	.bmdma_status		= mv_bmdma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	.port_start		= mv_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	.port_stop		= mv_port_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static struct ata_port_operations mv_iie_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	.inherits		= &mv6_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	.dev_config		= ATA_OP_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	.qc_prep		= mv_qc_prep_iie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static const struct ata_port_info mv_port_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	{  /* chip_504x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		.flags		= MV_GEN_I_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		.port_ops	= &mv5_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	{  /* chip_508x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		.port_ops	= &mv5_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	{  /* chip_5080 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		.port_ops	= &mv5_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	{  /* chip_604x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		.flags		= MV_GEN_II_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		.port_ops	= &mv6_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	{  /* chip_608x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		.port_ops	= &mv6_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	{  /* chip_6042 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		.flags		= MV_GEN_IIE_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		.port_ops	= &mv_iie_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	{  /* chip_7042 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		.flags		= MV_GEN_IIE_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		.port_ops	= &mv_iie_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	{  /* chip_soc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		.flags		= MV_GEN_IIE_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		.pio_mask	= ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		.port_ops	= &mv_iie_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static const struct pci_device_id mv_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* RocketRAID 1720/174x have different identifiers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	/* Adaptec 1430SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	/* Marvell 7042 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	/* Highpoint RocketRAID PCIe series */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	{ }			/* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static const struct mv_hw_ops mv5xxx_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	.phy_errata		= mv5_phy_errata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	.enable_leds		= mv5_enable_leds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	.read_preamp		= mv5_read_preamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	.reset_hc		= mv5_reset_hc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	.reset_flash		= mv5_reset_flash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	.reset_bus		= mv5_reset_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static const struct mv_hw_ops mv6xxx_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	.phy_errata		= mv6_phy_errata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	.enable_leds		= mv6_enable_leds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	.read_preamp		= mv6_read_preamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	.reset_hc		= mv6_reset_hc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	.reset_flash		= mv6_reset_flash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	.reset_bus		= mv_reset_pci_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) static const struct mv_hw_ops mv_soc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	.phy_errata		= mv6_phy_errata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	.enable_leds		= mv_soc_enable_leds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	.read_preamp		= mv_soc_read_preamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	.reset_hc		= mv_soc_reset_hc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	.reset_flash		= mv_soc_reset_flash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	.reset_bus		= mv_soc_reset_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static const struct mv_hw_ops mv_soc_65n_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	.phy_errata		= mv_soc_65n_phy_errata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	.enable_leds		= mv_soc_enable_leds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	.reset_hc		= mv_soc_reset_hc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	.reset_flash		= mv_soc_reset_flash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	.reset_bus		= mv_soc_reset_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * Functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static inline void writelfl(unsigned long data, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	writel(data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	(void) readl(addr);	/* flush to avoid PCI posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) static inline unsigned int mv_hc_from_port(unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	return port >> MV_PORT_HC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) static inline unsigned int mv_hardport_from_port(unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	return port & MV_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * Consolidate some rather tricky bit shift calculations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  * This is hot-path stuff, so not a function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * Simple code, with two return values, so macro rather than inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * port is the sole input, in range 0..7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * shift is one output, for use with main_irq_cause / main_irq_mask registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * hardport is the other output, in range 0..3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * Note that port and hardport may be the same variable in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	hardport = mv_hardport_from_port(port);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	shift   += hardport * 2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 						 unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	return mv_hc_base(base, mv_hc_from_port(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	return  mv_hc_base_from_port(base, port) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		MV_SATAHC_ARBTR_REG_SZ +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	return hc_mmio + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) static inline void __iomem *mv_host_base(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	return hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) static inline void __iomem *mv_ap_base(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	return mv_port_base(mv_host_base(ap->host), ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static inline int mv_get_hc_count(unsigned long port_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  *      mv_save_cached_regs - (re-)initialize cached port registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *      @ap: the port whose registers we are caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  *	Initialize the local cache of port registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  *	so that reading them over and over again can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *	be avoided on the hotter paths of this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  *	This saves a few microseconds each time we switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *	to/from EDMA mode to perform (eg.) a drive cache flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static void mv_save_cached_regs(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	pp->cached.fiscfg = readl(port_mmio + FISCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	pp->cached.ltmode = readl(port_mmio + LTMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *      mv_write_cached_reg - write to a cached port register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  *      @addr: hardware address of the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  *      @old: pointer to cached value of the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  *      @new: new value for the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  *	Write a new value to a cached register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  *	but only if the value is different from before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (new != *old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		unsigned long laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		*old = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		 * Workaround for 88SX60x1-B2 FEr SATA#13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		 * Read-after-write is needed to prevent generating 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		 * write cycles on the PCI bus for SATA interface registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		 * at offsets ending in 0x4 or 0xc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		 * Looks like a lot of fuss, but it avoids an unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		 * +1 usec read-after-write delay for unaffected registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		laddr = (unsigned long)addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		if (laddr >= 0x300 && laddr <= 0x33c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			laddr &= 0x000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			if (laddr == 0x4 || laddr == 0xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				writelfl(new, addr); /* read after write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		writel(new, addr); /* unaffected by the errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static void mv_set_edma_ptrs(void __iomem *port_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			     struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			     struct mv_port_priv *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * initialize request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	WARN_ON(pp->crqb_dma & 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		 port_mmio + EDMA_REQ_Q_IN_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * initialize response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	WARN_ON(pp->crpb_dma & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		 port_mmio + EDMA_RSP_Q_OUT_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * When writing to the main_irq_mask in hardware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 * we must ensure exclusivity between the interrupt coalescing bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 * and the corresponding individual port DONE_IRQ bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * Note that this register is really an "IRQ enable" register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 * not an "IRQ mask" register as Marvell's naming might suggest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		mask &= ~DONE_IRQ_0_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		mask &= ~DONE_IRQ_4_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	writelfl(mask, hpriv->main_irq_mask_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void mv_set_main_irq_mask(struct ata_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				 u32 disable_bits, u32 enable_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	u32 old_mask, new_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	old_mask = hpriv->main_irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	new_mask = (old_mask & ~disable_bits) | enable_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (new_mask != old_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		hpriv->main_irq_mask = new_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		mv_write_main_irq_mask(new_mask, hpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void mv_enable_port_irqs(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				     unsigned int port_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	unsigned int shift, hardport, port = ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	u32 disable_bits, enable_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	enable_bits  = port_bits << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 					  void __iomem *port_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 					  unsigned int port_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	int hardport = mv_hardport_from_port(ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	void __iomem *hc_mmio = mv_hc_base_from_port(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				mv_host_base(ap->host), ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	u32 hc_irq_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* clear EDMA event indicators, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/* clear pending irq events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	/* clear FIS IRQ Cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (IS_GEN_IIE(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	mv_enable_port_irqs(ap, port_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void mv_set_irq_coalescing(struct ata_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 				  unsigned int count, unsigned int usecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	void __iomem *mmio = hpriv->base, *hc_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	u32 coal_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 							ALL_PORTS_COAL_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	/* Disable IRQ coalescing if either threshold is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (!usecs || !count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		clks = count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		/* Respect maximum limits of the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		clks = usecs * COAL_CLOCKS_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (clks > MAX_COAL_TIME_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			clks = MAX_COAL_TIME_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		if (count > MAX_COAL_IO_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			count = MAX_COAL_IO_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	mv_set_main_irq_mask(host, coal_disable, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (is_dual_hc && !IS_GEN_I(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 * GEN_II/GEN_IIE with dual host controllers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 * one set of global thresholds for the entire chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		/* clear leftover coal IRQ bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			coal_enable = ALL_PORTS_COAL_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		clks = count = 0; /* force clearing of regular regs below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * All chips: independent thresholds for each HC on the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	hc_mmio = mv_hc_base_from_port(mmio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		coal_enable |= PORTS_0_3_COAL_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (is_dual_hc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			coal_enable |= PORTS_4_7_COAL_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	mv_set_main_irq_mask(host, 0, coal_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  *      mv_start_edma - Enable eDMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  *      @base: port base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  *      @pp: port private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  *      Verify the local cache of the eDMA state is accurate with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  *      WARN_ON.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			 struct mv_port_priv *pp, u8 protocol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	int want_ncq = (protocol == ATA_PROT_NCQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		if (want_ncq != using_ncq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			mv_stop_edma(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		mv_edma_cfg(ap, want_ncq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		mv_set_edma_ptrs(port_mmio, hpriv, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	 * Wait for the EDMA engine to finish transactions in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	 * No idea what a good "timeout" value might be, but measurements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	 * indicate that it often requires hundreds of microseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	 * with two drives in-use.  So we use the 15msec value above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	 * as a rough guess at what even more drives might require.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	for (i = 0; i < timeout; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if ((edma_stat & empty_idle) == empty_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		udelay(per_loop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  *      mv_stop_edma_engine - Disable eDMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  *      @port_mmio: io base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int mv_stop_edma_engine(void __iomem *port_mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	/* Disable eDMA.  The disable bit auto clears. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	/* Wait for the chip to confirm eDMA is off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	for (i = 10000; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		u32 reg = readl(port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		if (!(reg & EDMA_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static int mv_stop_edma(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	mv_wait_for_edma_empty_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (mv_stop_edma_engine(port_mmio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		ata_port_err(ap, "Unable to stop eDMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	mv_edma_cfg(ap, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #ifdef ATA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static void mv_dump_mem(void __iomem *start, unsigned bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	int b, w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	for (b = 0; b < bytes; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		DPRINTK("%p: ", start + b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		for (w = 0; b < bytes && w < 4; w++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			printk("%08x ", readl(start + b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			b += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) #if defined(ATA_DEBUG) || defined(CONFIG_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #ifdef ATA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	int b, w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	u32 dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	for (b = 0; b < bytes; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		DPRINTK("%02x: ", b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		for (w = 0; b < bytes && w < 4; w++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			(void) pci_read_config_dword(pdev, b, &dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			printk("%08x ", dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			b += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void mv_dump_all_regs(void __iomem *mmio_base, int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			     struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #ifdef ATA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	void __iomem *hc_base = mv_hc_base(mmio_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 					   port >> MV_PORT_HC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	void __iomem *port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	int start_port, num_ports, p, start_hc, num_hcs, hc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (0 > port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		start_hc = start_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		num_ports = 8;		/* shld be benign for 4 port devs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		num_hcs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		start_hc = port >> MV_PORT_HC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		start_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		num_ports = num_hcs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		num_ports > 1 ? num_ports - 1 : start_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (NULL != pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		DPRINTK("PCI config space regs:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		mv_dump_pci_cfg(pdev, 0x68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	DPRINTK("PCI regs:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	mv_dump_mem(mmio_base+0xc00, 0x3c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	mv_dump_mem(mmio_base+0xd00, 0x34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	mv_dump_mem(mmio_base+0xf00, 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		hc_base = mv_hc_base(mmio_base, hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		DPRINTK("HC regs (HC %i):\n", hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		mv_dump_mem(hc_base, 0x1c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	for (p = start_port; p < start_port + num_ports; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		port_base = mv_port_base(mmio_base, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		DPRINTK("EDMA regs (port %i):\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		mv_dump_mem(port_base, 0x54);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		DPRINTK("SATA regs (port %i):\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		mv_dump_mem(port_base+0x300, 0x60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static unsigned int mv_scr_offset(unsigned int sc_reg_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	unsigned int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	switch (sc_reg_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	case SCR_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	case SCR_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	case SCR_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	case SCR_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		ofs = SATA_ACTIVE;   /* active is not with the others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		ofs = 0xffffffffU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	unsigned int ofs = mv_scr_offset(sc_reg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (ofs != 0xffffffffU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		*val = readl(mv_ap_base(link->ap) + ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	unsigned int ofs = mv_scr_offset(sc_reg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (ofs != 0xffffffffU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		void __iomem *addr = mv_ap_base(link->ap) + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		struct mv_host_priv *hpriv = link->ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		if (sc_reg_in == SCR_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			 * Workaround for 88SX60x1 FEr SATA#26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			 * COMRESETs have to take care not to accidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			 * put the drive to sleep when writing SCR_CONTROL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			 * Setting bits 12..15 prevents this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			 * So if we see an outbound COMMRESET, set those bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			 * Ditto for the followup write that clears the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			 * The proprietary driver does this for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			 * all chip versions, and so do we.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				val |= 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 				void __iomem *lp_phy_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 					mv_ap_base(link->ap) + LP_PHY_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 				 * Set PHY speed according to SControl speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				u32 lp_phy_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 					LP_PHY_CTL_PIN_PU_PLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 					LP_PHY_CTL_PIN_PU_RX  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 					LP_PHY_CTL_PIN_PU_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 				if ((val & 0xf0) != 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 					lp_phy_val |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 						LP_PHY_CTL_GEN_TX_3G |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 						LP_PHY_CTL_GEN_RX_3G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				writelfl(lp_phy_val, lp_phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		writelfl(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void mv6_dev_config(struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	 * Gen-II does not support NCQ over a port multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	 *  (no FIS-based switching).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (adev->flags & ATA_DFLAG_NCQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		if (sata_pmp_attached(adev->link->ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			adev->flags &= ~ATA_DFLAG_NCQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			ata_dev_info(adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				"NCQ disabled for command-based switching\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static int mv_qc_defer(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	struct ata_link *link = qc->dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	 * Don't allow new commands if we're in a delayed EH state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * for NCQ and/or FIS-based switching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	/* PIO commands need exclusive link: no other commands [DMA or PIO]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	 * can run concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	 * set excl_link when we want to send a PIO command in DMA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	 * or a non-NCQ command in NCQ mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	 * When we receive a command from that link, and there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	 * outstanding commands, mark a flag to clear excl_link and let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	 * the command go through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (unlikely(ap->excl_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		if (link == ap->excl_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			if (ap->nr_active_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * If the port is completely idle, then allow the new qc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (ap->nr_active_links == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 * The port is operating in host queuing mode (EDMA) with NCQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * enabled, allow multiple NCQ commands.  EDMA also allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 * queueing multiple DMA commands but libata core currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 * doesn't allow it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		if (ata_is_ncq(qc->tf.protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			ap->excl_link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	return ATA_DEFER_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	void __iomem *port_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	ltmode   = *old_ltmode & ~LTMODE_BIT8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	haltcond = *old_haltcond | EDMA_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (want_fbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		ltmode = *old_ltmode | LTMODE_BIT8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		if (want_ncq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			haltcond &= ~EDMA_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			fiscfg |=  FISCFG_WAIT_DEV_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	u32 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	old = readl(hpriv->base + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (want_ncq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		new = old | (1 << 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		new = old & ~(1 << 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	if (new != old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		writel(new, hpriv->base + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  *	@ap: Port being initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  *	There are two DMA modes on these chips:  basic DMA, and EDMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  *	Bit-0 of the "EDMA RESERVED" register enables/disables use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  *	of basic DMA on the GEN_IIE versions of the chips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  *	This bit survives EDMA resets, and must be set for basic DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  *	to function, and should be cleared when EDMA is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	u32 new, *old = &pp->cached.unknown_rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (enable_bmdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		new = *old | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		new = *old & ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * SOC chips have an issue whereby the HDD LEDs don't always blink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * of the SOC takes care of it, generating a steady blink rate when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * any drive on the chip is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  * Unfortunately, the blink mode is a global hardware setting for the SOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * so we must use it whenever at least one port on the SOC has NCQ enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  * LED operation works then, and provides better (more accurate) feedback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * Note that this code assumes that an SOC never has more than one HC onboard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static void mv_soc_led_blink_enable(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct ata_host *host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	void __iomem *hc_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	u32 led_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static void mv_soc_led_blink_disable(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct ata_host *host = ap->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	void __iomem *hc_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	u32 led_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	unsigned int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	/* disable led-blink only if no ports are using NCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	for (port = 0; port < hpriv->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		struct ata_port *this_ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		struct mv_port_priv *pp = this_ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	u32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	struct mv_port_priv *pp    = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	void __iomem *port_mmio    = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	/* set up non-NCQ EDMA configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	pp->pp_flags &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (IS_GEN_I(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		cfg |= (1 << 8);	/* enab config burst size mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	else if (IS_GEN_II(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		mv_60x1_errata_sata25(ap, want_ncq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	} else if (IS_GEN_IIE(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		int want_fbs = sata_pmp_attached(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		 * Possible future enhancement:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		 * The chip can use FBS with non-NCQ, if we allow it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		 * But first we need to have the error handling in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		 * for this mode (datasheet section 7.3.15.4.2.3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		 * So disallow non-NCQ FBS for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		want_fbs &= want_ncq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		mv_config_fbs(ap, want_ncq, want_fbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		if (want_fbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		if (want_edma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			cfg |= (1 << 22); /* enab 4-entry host queue cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			if (!IS_SOC(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 				cfg |= (1 << 18); /* enab early completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		mv_bmdma_enable_iie(ap, !want_edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		if (IS_SOC(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			if (want_ncq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				mv_soc_led_blink_enable(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 				mv_soc_led_blink_disable(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (want_ncq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		cfg |= EDMA_CFG_NCQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	writelfl(cfg, port_mmio + EDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void mv_port_free_dma_mem(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	if (pp->crqb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		pp->crqb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (pp->crpb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		pp->crpb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	 * For later hardware, we have one unique sg_tbl per NCQ tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		if (pp->sg_tbl[tag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			if (tag == 0 || !IS_GEN_I(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				dma_pool_free(hpriv->sg_tbl_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 					      pp->sg_tbl[tag],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 					      pp->sg_tbl_dma[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			pp->sg_tbl[tag] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  *      mv_port_start - Port specific init/start routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  *      @ap: ATA channel to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  *      Allocate and point to DMA memory, init port private memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  *      zero indices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static int mv_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	struct device *dev = ap->host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	struct mv_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	if (!pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	ap->private_data = pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	if (!pp->crqb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (!pp->crpb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		goto out_port_free_dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		ap->flags |= ATA_FLAG_AN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	 * For later hardware, we need one unique sg_tbl per NCQ tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		if (tag == 0 || !IS_GEN_I(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			if (!pp->sg_tbl[tag])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 				goto out_port_free_dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 			pp->sg_tbl[tag]     = pp->sg_tbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	mv_save_cached_regs(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	mv_edma_cfg(ap, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) out_port_free_dma_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	mv_port_free_dma_mem(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  *      mv_port_stop - Port specific cleanup/stop routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  *      @ap: ATA channel to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  *      Stop DMA, cleanup port memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  *      This routine uses the host lock to protect the DMA stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void mv_port_stop(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	mv_stop_edma(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	mv_enable_port_irqs(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	mv_port_free_dma_mem(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  *      @qc: queued command whose SG list to source from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  *      Populate the SG list and mark the last entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static void mv_fill_sg(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	struct mv_port_priv *pp = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	struct mv_sg *mv_sg, *last_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	unsigned int si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	mv_sg = pp->sg_tbl[qc->hw_tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		dma_addr_t addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		u32 sg_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		while (sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			u32 offset = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			u32 len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			if (offset + len > 0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 				len = 0x10000 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			mv_sg->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			sg_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			last_sg = mv_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 			mv_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	if (likely(last_sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	mb(); /* ensure data structure is visible to the chipset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		(last ? CRQB_CMD_LAST : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	*cmdw = cpu_to_le16(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)  *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  *	@ap: Port associated with this ATA transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  *	We need this only for ATAPI bmdma transactions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  *	as otherwise we experience spurious interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)  *	after libata-sff handles the bmdma interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static void mv_sff_irq_clear(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  *	@qc: queued command to check for chipset/DMA compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  *	The bmdma engines cannot handle speculative data sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  *	(bytecount under/over flow).  So only allow DMA for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  *	data transfer commands with known data sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)  *	Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct scsi_cmnd *scmd = qc->scsicmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	if (scmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		switch (scmd->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		case GPCMD_READ_CD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		case GPCMD_SEND_DVD_STRUCTURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		case GPCMD_SEND_CUE_SHEET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			return 0; /* DMA is safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	return -EOPNOTSUPP; /* use PIO instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)  *	mv_bmdma_setup - Set up BMDMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)  *	@qc: queued command to prepare DMA for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)  *	Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static void mv_bmdma_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	mv_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	/* clear all DMA cmd bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	writel(0, port_mmio + BMDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	/* load PRD table addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		port_mmio + BMDMA_PRD_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	writelfl(pp->sg_tbl_dma[qc->hw_tag],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		port_mmio + BMDMA_PRD_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	/* issue r/w command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	ap->ops->sff_exec_command(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)  *	mv_bmdma_start - Start a BMDMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)  *	@qc: queued command to start DMA on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)  *	Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) static void mv_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	/* start host DMA transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	writelfl(cmd, port_mmio + BMDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)  *	mv_bmdma_stop - Stop BMDMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)  *	@qc: queued command to stop DMA on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  *	Clears the ATA_DMA_START flag in the bmdma control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  *	Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static void mv_bmdma_stop_ap(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	/* clear start/stop bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	cmd = readl(port_mmio + BMDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (cmd & ATA_DMA_START) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		cmd &= ~ATA_DMA_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		writelfl(cmd, port_mmio + BMDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		ata_sff_dma_pause(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static void mv_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	mv_bmdma_stop_ap(qc->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)  *	mv_bmdma_status - Read BMDMA status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)  *	@ap: port for which to retrieve DMA status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  *	Read and return equivalent of the sff BMDMA status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  *	LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  *	Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static u8 mv_bmdma_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	u32 reg, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	 * and the ATA_DMA_INTR bit doesn't exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	reg = readl(port_mmio + BMDMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (reg & ATA_DMA_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		status = ATA_DMA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	else if (reg & ATA_DMA_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		 * Just because DMA_ACTIVE is 0 (DMA completed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		 * this does _not_ mean the device is "done".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		 * So we should not yet be signalling ATA_DMA_INTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		mv_bmdma_stop_ap(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 			status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			status = ATA_DMA_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	struct ata_taskfile *tf = &qc->tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	 * Workaround for 88SX60x1 FEr SATA#24.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	 * Chip may corrupt WRITEs if multi_count >= 4kB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	 * Note that READs are unaffected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	 * It's not clear if this errata really means "4K bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	 * or if it always happens for multi_count > 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	 * regardless of device sector_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	 * So, for safety, any write with multi_count > 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	 * gets converted here into a regular PIO write instead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		if (qc->dev->multi_count > 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			switch (tf->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			case ATA_CMD_WRITE_MULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				tf->command = ATA_CMD_PIO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			case ATA_CMD_WRITE_MULTI_FUA_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 				fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			case ATA_CMD_WRITE_MULTI_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 				tf->command = ATA_CMD_PIO_WRITE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)  *      mv_qc_prep - Host specific command preparation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)  *      @qc: queued command to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  *      This routine simply redirects to the general purpose routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  *      if command is not DMA.  Else, it handles prep of the CRQB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  *      (command request block), does some sanity checking, and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  *      the SG load routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	__le16 *cw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	struct ata_taskfile *tf = &qc->tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	u16 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	unsigned in_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	switch (tf->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		if (tf->command == ATA_CMD_DSM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	case ATA_PROT_NCQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		break;	/* continue below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		mv_rw_multi_errata_sata24(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	/* Fill in command request block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (!(tf->flags & ATA_TFLAG_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		flags |= CRQB_FLAG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	/* get current queue index from software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	in_index = pp->req_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	pp->crqb[in_index].sg_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	pp->crqb[in_index].sg_addr_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	cw = &pp->crqb[in_index].ata_cmd[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	/* Sadly, the CRQB cannot accommodate all registers--there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	 * only 11 bytes...so we must pick and choose required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	 * registers based on the command.  So, we drop feature and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 * hob_feature for [RW] DMA commands, but they are needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	switch (tf->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	case ATA_CMD_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	case ATA_CMD_READ_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	case ATA_CMD_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	case ATA_CMD_WRITE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	case ATA_CMD_WRITE_FUA_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	case ATA_CMD_FPDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	case ATA_CMD_FPDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		/* The only other commands EDMA supports in non-queued and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		 * of which are defined/used by Linux.  If we get here, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		 * driver needs work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 				tf->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		return AC_ERR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	mv_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)  *      mv_qc_prep_iie - Host specific command preparation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)  *      @qc: queued command to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)  *      This routine simply redirects to the general purpose routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)  *      if command is not DMA.  Else, it handles prep of the CRQB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)  *      (command request block), does some sanity checking, and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)  *      the SG load routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	struct mv_crqb_iie *crqb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	struct ata_taskfile *tf = &qc->tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	unsigned in_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	if ((tf->protocol != ATA_PROT_DMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	    (tf->protocol != ATA_PROT_NCQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	if (tf->command == ATA_CMD_DSM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		return AC_ERR_OK;  /* use bmdma for this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	/* Fill in Gen IIE command request block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	if (!(tf->flags & ATA_TFLAG_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		flags |= CRQB_FLAG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	/* get current queue index from software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	in_index = pp->req_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	crqb->flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	crqb->ata_cmd[0] = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			(tf->command << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			(tf->feature << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	crqb->ata_cmd[1] = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 			(tf->lbal << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			(tf->lbam << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 			(tf->lbah << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			(tf->device << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	crqb->ata_cmd[2] = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			(tf->hob_lbal << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			(tf->hob_lbam << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			(tf->hob_lbah << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			(tf->hob_feature << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	crqb->ata_cmd[3] = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 			(tf->nsect << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			(tf->hob_nsect << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	mv_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)  *	mv_sff_check_status - fetch device status, if valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)  *	@ap: ATA port to fetch status from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)  *	When using command issue via mv_qc_issue_fis(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)  *	the initial ATA_BUSY state does not show up in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)  *	ATA status (shadow) register.  This can confuse libata!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  *	So we have a hook here to fake ATA_BUSY for that situation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)  *	until the first time a BUSY, DRQ, or ERR bit is seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)  *	The rest of the time, it simply returns the ATA status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static u8 mv_sff_check_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	u8 stat = ioread8(ap->ioaddr.status_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			stat = ATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	return stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)  *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)  *	@fis: fis to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)  *	@nwords: number of 32-bit words in the fis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	u32 ifctl, old_ifctl, ifstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	int i, timeout = 200, final_word = nwords - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	/* Initiate FIS transmission mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	old_ifctl = readl(port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	ifctl = 0x100 | (old_ifctl & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	writelfl(ifctl, port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	/* Send all words of the FIS except for the final word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	for (i = 0; i < final_word; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	/* Flag end-of-transmission, and then send the final word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	 * Wait for FIS transmission to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	 * This typically takes just a single iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		ifstat = readl(port_mmio + SATA_IFSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	} while (!(ifstat & 0x1000) && --timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	/* Restore original port configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	/* See if it worked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	if ((ifstat & 0x3000) != 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 			      __func__, ifstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		return AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)  *	mv_qc_issue_fis - Issue a command directly as a FIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)  *	@qc: queued command to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)  *	Note that the ATA shadow registers are not updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)  *	after command issue, so the device will appear "READY"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)  *	if polled, even while it is BUSY processing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)  *	So we use a status hook to fake ATA_BUSY until the drive changes state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)  *	Note: we don't get updated shadow regs on *completion*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)  *	of non-data commands. So avoid sending them via this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)  *	as they will appear to have completed immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)  *	GEN_IIE has special registers that we could get the result tf from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)  *	but earlier chipsets do not.  For now, we ignore those registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	struct ata_link *link = qc->dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	u32 fis[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		ap->hsm_task_state = HSM_ST_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		if (qc->tf.flags & ATA_TFLAG_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			ap->hsm_task_state = HSM_ST_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			ap->hsm_task_state = HSM_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)  *      mv_qc_issue - Initiate a command to the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)  *      @qc: queued command to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)  *      This routine simply redirects to the general purpose routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)  *      if command is not DMA.  Else, it sanity checks our local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)  *      caches of the request producer/consumer indices then enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)  *      DMA and bumps the request producer index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	static int limit_warnings = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	u32 in_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	unsigned int port_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		if (qc->tf.command == ATA_CMD_DSM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 				return AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 			break;  /* use bmdma for this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	case ATA_PROT_NCQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		/* Write the request in pointer to kick the EDMA to life */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 					port_mmio + EDMA_REQ_Q_IN_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		 * Someday, we might implement special polling workarounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		 * for these, but it all seems rather unnecessary since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		 * normally use only DMA for commands which transfer more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		 * than a single block of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		 * Much of the time, this could just work regardless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		 * So for now, just log the incident, and allow the attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 			--limit_warnings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			ata_link_warn(qc->dev->link, DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 				      ": attempting PIO w/multiple DRQ: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 				      "this may fail due to h/w errata\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		if (ap->flags & ATA_FLAG_PIO_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			qc->tf.flags |= ATA_TFLAG_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	 * We're about to send a non-EDMA capable command to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	 * port.  Turn off EDMA so there won't be problems accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	 * shadow block, etc registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	mv_stop_edma(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	mv_pmp_select(ap, qc->dev->link->pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		 * After any NCQ error, the READ_LOG_EXT command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		 * from libata-eh *must* use mv_qc_issue_fis().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		 * Otherwise it might fail, due to chip errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		 * Rather than special-case it, we'll just *always*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		 * use this method here for READ_LOG_EXT, making for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		 * easier testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		if (IS_GEN_II(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			return mv_qc_issue_fis(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	return ata_bmdma_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		return qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) static void mv_pmp_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	unsigned int pmp, pmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		 * Perform NCQ error analysis on failed PMPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		 * before we freeze the port entirely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		pmp_map = pp->delayed_eh_pmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		for (pmp = 0; pmp_map != 0; pmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			unsigned int this_pmp = (1 << pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			if (pmp_map & this_pmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 				struct ata_link *link = &ap->pmp_link[pmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 				pmp_map &= ~this_pmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 				ata_eh_analyze_ncq_error(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	sata_pmp_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	return readl(port_mmio + SATA_TESTCTL) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	unsigned int pmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	 * Initialize EH info for PMPs which saw device errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	for (pmp = 0; pmp_map != 0; pmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		unsigned int this_pmp = (1 << pmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		if (pmp_map & this_pmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 			struct ata_link *link = &ap->pmp_link[pmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 			struct ata_eh_info *ehi = &link->eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 			pmp_map &= ~this_pmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 			ata_ehi_push_desc(ehi, "dev err");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 			ehi->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			ehi->action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 			ata_link_abort(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static int mv_req_q_empty(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	u32 in_ptr, out_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	int failed_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	unsigned int old_map, new_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	 * Device error during FBS+NCQ operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	 * Set a port flag to prevent further I/O being enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	 * Leave the EDMA running to drain outstanding commands from this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	 * Perform the post-mortem/EH only when all responses are complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		pp->delayed_eh_pmp_map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	old_map = pp->delayed_eh_pmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	new_map = old_map | mv_get_err_pmp_map(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	if (old_map != new_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		pp->delayed_eh_pmp_map = new_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		mv_pmp_eh_prep(ap, new_map & ~old_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	failed_links = hweight16(new_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	ata_port_info(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		      __func__, pp->delayed_eh_pmp_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		      ap->qc_active, failed_links,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		      ap->nr_active_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		mv_process_crpb_entries(ap, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		mv_stop_edma(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		mv_eh_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		ata_port_info(ap, "%s: done\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		return 1;	/* handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	ata_port_info(ap, "%s: waiting\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	return 1;	/* handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	 * Possible future enhancement:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	 * FBS+non-NCQ operation is not yet implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	 * See related notes in mv_edma_cfg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	 * Device error during FBS+non-NCQ operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	 * We need to snapshot the shadow registers for each failed command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	return 0;	/* not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		return 0;	/* EDMA was not active: not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		return 0;	/* FBS was not active: not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	if (!(edma_err_cause & EDMA_ERR_DEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		return 0;	/* non DEV error: not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		return 0;	/* other problems: not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		 * EDMA should NOT have self-disabled for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		 * If it did, then something is wrong elsewhere,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		 * and we cannot handle it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 				      __func__, edma_err_cause, pp->pp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			return 0; /* not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		return mv_handle_fbs_ncq_dev_err(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		 * EDMA should have self-disabled for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		 * If it did not, then something is wrong elsewhere,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 		 * and we cannot handle it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 				      __func__, edma_err_cause, pp->pp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			return 0; /* not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		return mv_handle_fbs_non_ncq_dev_err(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	return 0;	/* not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	char *when = "idle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	if (edma_was_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		when = "EDMA enabled";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 			when = "polling";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	ehi->err_mask |= AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	ehi->action   |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  *      mv_err_intr - Handle error interrupts on the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  *      @ap: ATA channel to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  *      Most cases require a full reset of the chip's state machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)  *      which also performs a COMRESET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)  *      Also, if the port disabled DMA, update our cached copy to match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static void mv_err_intr(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	u32 fis_cause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	unsigned int action = 0, err_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	int abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	 * Read and clear the SError and err_cause bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	sata_scr_read(&ap->link, SCR_ERROR, &serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	if (edma_err_cause & EDMA_ERR_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		 * Device errors during FIS-based switching operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		 * require special handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		if (mv_handle_dev_err(ap, edma_err_cause))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	qc = mv_get_active_qc(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 			  edma_err_cause, pp->pp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		if (fis_cause & FIS_IRQ_CAUSE_AN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			u32 ec = edma_err_cause &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 			sata_async_notification(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			if (!ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 				return; /* Just an AN; no need for the nukes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			ata_ehi_push_desc(ehi, "SDB notify");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	 * All generations share these EDMA error cause bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	if (edma_err_cause & EDMA_ERR_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		ata_ehi_push_desc(ehi, "dev error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 			EDMA_ERR_INTRL_PAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		err_mask |= AC_ERR_ATA_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		ata_ehi_push_desc(ehi, "parity error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		ata_ehi_hotplugged(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			"dev disconnect" : "dev connect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	 * Gen-I has a different SELF_DIS bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	 * different FREEZE bits, and no SERR bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	if (IS_GEN_I(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		eh_freeze_mask = EDMA_EH_FREEZE_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			ata_ehi_push_desc(ehi, "EDMA self-disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		eh_freeze_mask = EDMA_EH_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			ata_ehi_push_desc(ehi, "EDMA self-disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		if (edma_err_cause & EDMA_ERR_SERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			ata_ehi_push_desc(ehi, "SError=%08x", serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 			err_mask |= AC_ERR_ATA_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 			action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	if (!err_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		err_mask = AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		action |= ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	ehi->serror |= serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	ehi->action |= action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		qc->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		ehi->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	if (err_mask == AC_ERR_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		 * Cannot do ata_port_freeze() here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		 * because it would kill PIO access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		 * which is needed for further diagnosis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		mv_eh_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	} else if (edma_err_cause & eh_freeze_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		 * Note to self: ata_port_freeze() calls ata_port_abort()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		if (qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 			ata_link_abort(qc->dev->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			ata_port_abort(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) static bool mv_process_crpb_response(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	u8 ata_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	u16 edma_status = le16_to_cpu(response->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	 * edma_status from a response queue entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	 *   MSB is saved ATA status from command completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	if (!ncq_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		if (err_cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			 * Error will be seen/handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 			 * mv_err_intr().  So do nothing at all here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	if (!ac_err_mask(ata_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	/* else: leave it for mv_err_intr() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	u32 in_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	bool work_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	u32 done_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	/* Get the hardware queue position index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	/* Process new responses from since the last time we looked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	while (in_index != pp->resp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		if (IS_GEN_I(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 			/* 50xx: no NCQ, only one command active at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			tag = ap->link.active_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 			/* Gen II/IIE: get command tag from CRPB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 			tag = le16_to_cpu(response->id) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 			done_mask |= 1 << tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		work_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	if (work_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		/* Update the software queue position index in hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 			 port_mmio + EDMA_RSP_Q_OUT_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) static void mv_port_intr(struct ata_port *ap, u32 port_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	struct mv_port_priv *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	int edma_was_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	 * Grab a snapshot of the EDMA_EN flag setting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	 * so that we have a consistent view for this port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	 * even if something we call of our routines changes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	 * Process completed CRPB response(s) before other events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		mv_process_crpb_entries(ap, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 			mv_handle_fbs_ncq_dev_err(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	 * Handle chip-reported errors, or continue on to handle PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	if (unlikely(port_cause & ERR_IRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		mv_err_intr(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	} else if (!edma_was_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		if (qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			ata_bmdma_port_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 			mv_unexpected_intr(ap, edma_was_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)  *      mv_host_intr - Handle all interrupts on the given host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)  *      @host: host specific structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)  *      @main_irq_cause: Main interrupt cause register for the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	void __iomem *mmio = hpriv->base, *hc_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	unsigned int handled = 0, port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	/* If asserted, clear the "all ports" IRQ coalescing bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	if (main_irq_cause & ALL_PORTS_COAL_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	for (port = 0; port < hpriv->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		struct ata_port *ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		unsigned int p, shift, hardport, port_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		 * Each hc within the host has its own hc_irq_cause register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		 * where the interrupting ports bits get ack'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		if (hardport == 0) {	/* first port on this hc ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			u32 port_mask, ack_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			 * Skip this entire hc if nothing pending for any ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			if (!hc_cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 				port += MV_PORTS_PER_HC - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 			 * We don't need/want to read the hc_irq_cause register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 			 * because doing so hurts performance, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 			 * main_irq_cause already gives us everything we need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 			 * But we do have to *write* to the hc_irq_cause to ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 			 * the ports that we are handling this time through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 			 * This requires that we create a bitmap for those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			 * ports which interrupted us, and use that bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 			 * to ack (only) those ports via hc_irq_cause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 			ack_irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			if (hc_cause & PORTS_0_3_COAL_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 				ack_irqs = HC_COAL_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 				if ((port + p) >= hpriv->n_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 				if (hc_cause & port_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 			hc_mmio = mv_hc_base_from_port(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 			handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 		 * Handle interrupts signalled for this port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		if (port_cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 			mv_port_intr(ap, port_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	struct ata_eh_info *ehi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	unsigned int i, err_mask, printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	u32 err_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	err_cause = readl(mmio + hpriv->irq_cause_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	DPRINTK("All regs @ PCI error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	writelfl(0, mmio + hpriv->irq_cause_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		if (!ata_link_offline(&ap->link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 			ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			ata_ehi_clear_desc(ehi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 			if (!printed++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 				ata_ehi_push_desc(ehi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 					"PCI err cause 0x%08x", err_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 			err_mask = AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			ehi->action = ATA_EH_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 			if (qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 				qc->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 				ehi->err_mask |= err_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 			ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	return 1;	/* handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)  *      mv_interrupt - Main interrupt event handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)  *      @irq: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)  *      @dev_instance: private data; in this case the host structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)  *      Read the read only register to determine if any host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)  *      controllers have pending interrupts.  If so, call lower level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)  *      routine to handle.  Also check for PCI errors which are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)  *      reported here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)  *      This routine holds the host lock while processing pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)  *      interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) static irqreturn_t mv_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	u32 main_irq_cause, pending_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	spin_lock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	/* for MSI:  block new interrupts while in here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	if (using_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 		mv_write_main_irq_mask(0, hpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	main_irq_cause = readl(hpriv->main_irq_cause_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	 * Deal with cases where we either have nothing pending, or have read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	 * a bogus register value which can indicate HW removal or PCI fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (pending_irqs && main_irq_cause != 0xffffffffU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 			handled = mv_pci_error(host, hpriv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 			handled = mv_host_intr(host, pending_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	/* for MSI: unmask; interrupt cause bits will retrigger now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	if (using_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	spin_unlock(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	unsigned int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	switch (sc_reg_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	case SCR_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	case SCR_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	case SCR_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		ofs = sc_reg_in * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		ofs = 0xffffffffU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	return ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	struct mv_host_priv *hpriv = link->ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	if (ofs != 0xffffffffU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		*val = readl(addr + ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	struct mv_host_priv *hpriv = link->ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	if (ofs != 0xffffffffU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 		writelfl(val, addr + ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	int early_5080;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	if (!early_5080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		tmp |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	mv_reset_pci_bus(host, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	writel(0x0fcfffff, mmio + FLASH_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 			   void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	tmp = readl(phy_mmio + MV5_PHY_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	writel(0, mmio + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	tmp |= ~(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 			   unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	if (fix_apm_sq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		tmp = readl(phy_mmio + MV5_LTMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		tmp |= (1 << 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		writel(tmp, phy_mmio + MV5_LTMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		tmp = readl(phy_mmio + MV5_PHY_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		tmp &= ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		tmp |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		writel(tmp, phy_mmio + MV5_PHY_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	tmp = readl(phy_mmio + MV5_PHY_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	tmp &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	tmp |= hpriv->signal[port].pre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	tmp |= hpriv->signal[port].amps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	writel(tmp, phy_mmio + MV5_PHY_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) #define ZERO(reg) writel(0, port_mmio + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 			     unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	void __iomem *port_mmio = mv_port_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	mv_reset_channel(hpriv, mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	ZERO(0x028);	/* command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	writel(0x11f, port_mmio + EDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	ZERO(0x004);	/* timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	ZERO(0x008);	/* irq err cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	ZERO(0x00c);	/* irq err mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	ZERO(0x010);	/* rq bah */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	ZERO(0x014);	/* rq inp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	ZERO(0x018);	/* rq outp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	ZERO(0x01c);	/* respq bah */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	ZERO(0x024);	/* respq outp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	ZERO(0x020);	/* respq inp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	ZERO(0x02c);	/* test control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) #define ZERO(reg) writel(0, hc_mmio + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 			unsigned int hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	ZERO(0x00c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	ZERO(0x010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	ZERO(0x014);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	ZERO(0x018);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	tmp = readl(hc_mmio + 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	tmp &= 0x1c1c1c1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	tmp |= 0x03030303;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	writel(tmp, hc_mmio + 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 			unsigned int n_hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	unsigned int hc, port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	for (hc = 0; hc < n_hc; hc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		for (port = 0; port < MV_PORTS_PER_HC; port++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			mv5_reset_hc_port(hpriv, mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 					  (hc * MV_PORTS_PER_HC) + port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		mv5_reset_one_hc(hpriv, mmio, hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) #define ZERO(reg) writel(0, mmio + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	tmp = readl(mmio + MV_PCI_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	tmp &= 0xff00ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	writel(tmp, mmio + MV_PCI_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	ZERO(MV_PCI_DISC_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	ZERO(MV_PCI_MSI_TRIGGER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	ZERO(MV_PCI_SERR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	ZERO(hpriv->irq_cause_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	ZERO(hpriv->irq_mask_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	ZERO(MV_PCI_ERR_ATTRIBUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	ZERO(MV_PCI_ERR_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	mv5_reset_flash(hpriv, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	tmp = readl(mmio + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	tmp &= 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	tmp |= (1 << 5) | (1 << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	writel(tmp, mmio + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)  *      mv6_reset_hc - Perform the 6xxx global soft reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)  *      @mmio: base address of the HBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)  *      This routine only applies to 6xxx parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 			unsigned int n_hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	u32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	/* Following procedure defined in PCI "main command and status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	 * register" table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	t = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	writel(t | STOP_PCI_MASTER, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	for (i = 0; i < 1000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		t = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		if (PCI_MASTER_EMPTY & t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	if (!(PCI_MASTER_EMPTY & t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	/* set reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	i = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		writel(t | GLOB_SFT_RST, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		t = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	if (!(GLOB_SFT_RST & t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	i = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		t = readl(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	if (GLOB_SFT_RST & t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 			   void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	void __iomem *port_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	tmp = readl(mmio + RESET_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	if ((tmp & (1 << 0)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		hpriv->signal[idx].amps = 0x7 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		hpriv->signal[idx].pre = 0x1 << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	port_mmio = mv_port_base(mmio, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	tmp = readl(port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	writel(0x00000060, mmio + GPIO_PORT_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 			   unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	void __iomem *port_mmio = mv_port_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	u32 hp_flags = hpriv->hp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	int fix_phy_mode2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	int fix_phy_mode4 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	u32 m2, m3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	if (fix_phy_mode2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		m2 = readl(port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		m2 &= ~(1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		m2 |= (1 << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		writel(m2, port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		m2 = readl(port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		m2 &= ~((1 << 16) | (1 << 31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		writel(m2, port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	 * Gen-II/IIe PHY_MODE3 errata RM#2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	 * Achieves better receiver noise performance than the h/w default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	m3 = readl(port_mmio + PHY_MODE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	m3 = (m3 & 0x1f) | (0x5555601 << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	/* Guideline 88F5182 (GL# SATA-S11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	if (IS_SOC(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		m3 &= ~0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	if (fix_phy_mode4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		u32 m4 = readl(port_mmio + PHY_MODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		 * Enforce reserved-bit restrictions on GenIIe devices only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		 * For earlier chipsets, force only the internal config field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 		 *  (workaround for errata FEr SATA#10 part 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		if (IS_GEN_IIE(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		writel(m4, port_mmio + PHY_MODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	 * Workaround for 60x1-B2 errata SATA#13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	 * Or ensure we use writelfl() when writing PHY_MODE4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	writel(m3, port_mmio + PHY_MODE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	/* Revert values of pre-emphasis and signal amps to the saved ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	m2 = readl(port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	m2 &= ~MV_M2_PREAMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	m2 |= hpriv->signal[port].amps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	m2 |= hpriv->signal[port].pre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	m2 &= ~(1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	/* according to mvSata 3.6.1, some IIE values are fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	if (IS_GEN_IIE(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		m2 &= ~0xC30FF01F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		m2 |= 0x0000900F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	writel(m2, port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) /* TODO: use the generic LED interface to configure the SATA Presence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) /* & Acitivy LEDs on the board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 				      void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 			   void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	void __iomem *port_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	port_mmio = mv_port_base(mmio, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	tmp = readl(port_mmio + PHY_MODE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) #define ZERO(reg) writel(0, port_mmio + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 					void __iomem *mmio, unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	void __iomem *port_mmio = mv_port_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	mv_reset_channel(hpriv, mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	ZERO(0x028);		/* command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	writel(0x101f, port_mmio + EDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	ZERO(0x004);		/* timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	ZERO(0x008);		/* irq err cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	ZERO(0x00c);		/* irq err mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	ZERO(0x010);		/* rq bah */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	ZERO(0x014);		/* rq inp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	ZERO(0x018);		/* rq outp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	ZERO(0x01c);		/* respq bah */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	ZERO(0x024);		/* respq outp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	ZERO(0x020);		/* respq inp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	ZERO(0x02c);		/* test control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) #define ZERO(reg) writel(0, hc_mmio + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 				       void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	ZERO(0x00c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	ZERO(0x010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	ZERO(0x014);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) #undef ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 				  void __iomem *mmio, unsigned int n_hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	unsigned int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	for (port = 0; port < hpriv->n_ports; port++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		mv_soc_reset_hc_port(hpriv, mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	mv_soc_reset_one_hc(hpriv, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 				      void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 				  void __iomem *mmio, unsigned int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	void __iomem *port_mmio = mv_port_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	u32	reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	reg = readl(port_mmio + PHY_MODE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	reg |= (0x1 << 27);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	reg |= (0x1 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	writel(reg, port_mmio + PHY_MODE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	reg = readl(port_mmio + PHY_MODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	reg |= (0x1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	writel(reg, port_mmio + PHY_MODE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	reg = readl(port_mmio + PHY_MODE9_GEN2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	reg |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	writel(reg, port_mmio + PHY_MODE9_GEN2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	reg = readl(port_mmio + PHY_MODE9_GEN1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	reg |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	writel(reg, port_mmio + PHY_MODE9_GEN1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)  *	soc_is_65 - check if the soc is 65 nano device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)  *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)  *	register, this register should contain non-zero value and it exists only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)  *	in the 65 nano devices, when reading it from older devices we get 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) static bool soc_is_65n(struct mv_host_priv *hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	if (readl(port0_mmio + PHYCFG_OFS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	if (want_gen2i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 		ifcfg |= (1 << 7);		/* enable gen2i speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	writelfl(ifcfg, port_mmio + SATA_IFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 			     unsigned int port_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	 * The datasheet warns against setting EDMA_RESET when EDMA is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	 * (but doesn't say what the problem might be).  So we first try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	 * to disable the EDMA engine before doing the EDMA_RESET operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	mv_stop_edma_engine(port_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	if (!IS_GEN_I(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		mv_setup_ifcfg(port_mmio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	 * link, and physical layers.  It resets all SATA interface registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	udelay(25);	/* allow reset propagation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	writelfl(0, port_mmio + EDMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	if (IS_GEN_I(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		usleep_range(500, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) static void mv_pmp_select(struct ata_port *ap, int pmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	if (sata_pmp_supported(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		u32 reg = readl(port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		int old = reg & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 		if (old != pmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 			reg = (reg & ~0xf) | pmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 			writelfl(reg, port_mmio + SATA_IFCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 				unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	mv_pmp_select(link->ap, sata_srst_pmp(link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	return sata_std_hardreset(link, class, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) static int mv_softreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 				unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	mv_pmp_select(link->ap, sata_srst_pmp(link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	return ata_sff_softreset(link, class, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) static int mv_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 			unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	struct mv_port_priv *pp = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	int rc, attempts = 0, extra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	u32 sstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	bool online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	mv_reset_channel(hpriv, mmio, ap->port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	pp->pp_flags &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	/* Workaround for errata FEr SATA#10 (part 2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 		const unsigned long *timing =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 				sata_ehc_deb_timing(&link->eh_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		rc = sata_link_hardreset(link, timing, deadline + extra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 					 &online, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		rc = online ? -EAGAIN : rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 		sata_scr_read(link, SCR_STATUS, &sstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 			/* Force 1.5gb/s link speed and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 			mv_setup_ifcfg(mv_ap_base(ap), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 			if (time_after(jiffies + HZ, deadline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 				extra = HZ; /* only extend it once, max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	mv_save_cached_regs(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	mv_edma_cfg(ap, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) static void mv_eh_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	mv_stop_edma(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	mv_enable_port_irqs(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) static void mv_eh_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	struct mv_host_priv *hpriv = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	unsigned int port = ap->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	unsigned int hardport = mv_hardport_from_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	void __iomem *port_mmio = mv_ap_base(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	u32 hc_irq_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	/* clear EDMA errors on this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	/* clear pending irq events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 	mv_enable_port_irqs(ap, ERR_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)  *      mv_port_init - Perform some early initialization on a single port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)  *      @port: libata data structure storing shadow register addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687)  *      @port_mmio: base address of the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)  *      Initialize shadow register mmio addresses, clear outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690)  *      interrupts on the port, and unmask interrupts for the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)  *      start of the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	/* PIO related setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	port->error_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	port->status_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	/* special case: control/altstatus doesn't have ATA_REG_ address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	/* Clear any currently outstanding port interrupt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	serr = port_mmio + mv_scr_offset(SCR_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	writelfl(readl(serr), serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	/* unmask all non-transient EDMA error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		readl(port_mmio + EDMA_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		readl(port_mmio + EDMA_ERR_IRQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) static unsigned int mv_in_pcix_mode(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		return 0;	/* not PCI-X capable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	reg = readl(mmio + MV_PCI_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	if ((reg & MV_PCI_MODE_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 		return 0;	/* conventional PCI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	return 1;	/* chip is in PCI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) static int mv_pci_cut_through_okay(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	if (!mv_in_pcix_mode(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		reg = readl(mmio + MV_PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		if (reg & MV_PCI_COMMAND_MRDTRIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 			return 0; /* not okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	return 1; /* okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) static void mv_60x1b2_errata_pci7(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	/* workaround for 60x1-B2 errata PCI#7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	if (mv_in_pcix_mode(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		u32 reg = readl(mmio + MV_PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	u32 hp_flags = hpriv->hp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	switch (board_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	case chip_5080:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 		hpriv->ops = &mv5xxx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		hp_flags |= MV_HP_GEN_I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 		switch (pdev->revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 			hp_flags |= MV_HP_ERRATA_50XXB0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 			hp_flags |= MV_HP_ERRATA_50XXB2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 			dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 				 "Applying 50XXB2 workarounds to unknown rev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 			hp_flags |= MV_HP_ERRATA_50XXB2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	case chip_504x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	case chip_508x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		hpriv->ops = &mv5xxx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 		hp_flags |= MV_HP_GEN_I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 		switch (pdev->revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 			hp_flags |= MV_HP_ERRATA_50XXB0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 			hp_flags |= MV_HP_ERRATA_50XXB2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 			dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 				 "Applying B2 workarounds to unknown rev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 			hp_flags |= MV_HP_ERRATA_50XXB2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	case chip_604x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	case chip_608x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 		hpriv->ops = &mv6xxx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		hp_flags |= MV_HP_GEN_II;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 		switch (pdev->revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		case 0x7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 			mv_60x1b2_errata_pci7(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 			hp_flags |= MV_HP_ERRATA_60X1B2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		case 0x9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 			hp_flags |= MV_HP_ERRATA_60X1C0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 			dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 				 "Applying B2 workarounds to unknown rev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 			hp_flags |= MV_HP_ERRATA_60X1B2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	case chip_7042:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 		    (pdev->device == 0x2300 || pdev->device == 0x2310))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 			 * Highpoint RocketRAID PCIe 23xx series cards:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 			 * Unconfigured drives are treated as "Legacy"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 			 * by the BIOS, and it overwrites sector 8 with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 			 * a "Lgcy" metadata block prior to Linux boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 			 * Configured drives (RAID or JBOD) leave sector 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 			 * alone, but instead overwrite a high numbered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 			 * sector for the RAID metadata.  This sector can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 			 * be determined exactly, by truncating the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 			 * drive capacity to a nice even GB value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 			 * Warn the user, lest they think we're just buggy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 				" BIOS CORRUPTS DATA on all attached drives,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 				" regardless of if/how they are configured."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 				" BEWARE!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 				" use sectors 8-9 on \"Legacy\" drives,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 				" and avoid the final two gigabytes on"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 				" all RocketRAID BIOS initialized drives.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	case chip_6042:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		hpriv->ops = &mv6xxx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 		hp_flags |= MV_HP_GEN_IIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 			hp_flags |= MV_HP_CUT_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		switch (pdev->revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		case 0x2: /* Rev.B0: the first/only public release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 			hp_flags |= MV_HP_ERRATA_60X1C0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 			dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 				 "Applying 60X1C0 workarounds to unknown rev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 			hp_flags |= MV_HP_ERRATA_60X1C0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 	case chip_soc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 		if (soc_is_65n(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 			hpriv->ops = &mv_soc_65n_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 			hpriv->ops = &mv_soc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 			MV_HP_ERRATA_60X1C0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 		dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	hpriv->hp_flags = hp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	if (hp_flags & MV_HP_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)  *      mv_init_host - Perform some early initialization of the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915)  *	@host: ATA host to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)  *      If possible, do an early global reset of the host.  Then do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)  *      our port init and clear/unmask all/relevant host interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) static int mv_init_host(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	int rc = 0, n_hc, port, hc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	void __iomem *mmio = hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	rc = mv_chip_id(host, hpriv->board_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	if (IS_SOC(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	/* initialize shadow irq mask with register's value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	/* global interrupt mask: 0 == mask everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 	mv_set_main_irq_mask(host, ~0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	n_hc = mv_get_hc_count(host->ports[0]->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	for (port = 0; port < host->n_ports; port++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 		if (hpriv->ops->read_preamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 			hpriv->ops->read_preamp(hpriv, port, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	hpriv->ops->reset_flash(hpriv, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	hpriv->ops->reset_bus(host, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	hpriv->ops->enable_leds(hpriv, mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	for (port = 0; port < host->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		struct ata_port *ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 		void __iomem *port_mmio = mv_port_base(mmio, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 		mv_port_init(&ap->ioaddr, port_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	for (hc = 0; hc < n_hc; hc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 			"(before clear)=0x%08x\n", hc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 			readl(hc_mmio + HC_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 			readl(hc_mmio + HC_IRQ_CAUSE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 		/* Clear any currently outstanding hc interrupt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	if (!IS_SOC(hpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 		/* Clear any currently outstanding host interrupt conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 		writelfl(0, mmio + hpriv->irq_cause_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 		/* and unmask interrupt generation for host regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	 * enable only global host interrupts for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	 * The per-port interrupts get done later as ports are set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	mv_set_main_irq_mask(host, 0, PCI_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	mv_set_irq_coalescing(host, irq_coalescing_io_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 				    irq_coalescing_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 							     MV_CRQB_Q_SZ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	if (!hpriv->crqb_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 							     MV_CRPB_Q_SZ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 	if (!hpriv->crpb_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 							     MV_SG_TBL_SZ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	if (!hpriv->sg_tbl_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 				 const struct mbus_dram_target_info *dram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 		writel(0, hpriv->base + WINDOW_CTRL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		writel(0, hpriv->base + WINDOW_BASE(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	for (i = 0; i < dram->num_cs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		const struct mbus_dram_window *cs = dram->cs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 		writel(((cs->size - 1) & 0xffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 			(cs->mbus_attr << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 			(dram->mbus_dram_target_id << 4) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 			hpriv->base + WINDOW_CTRL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		writel(cs->base, hpriv->base + WINDOW_BASE(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)  *      mv_platform_probe - handle a positive probe of an soc Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042)  *      host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)  *      @pdev: platform device found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) static int mv_platform_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	const struct mv_sata_platform_data *mv_platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	const struct mbus_dram_target_info *dram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	const struct ata_port_info *ppi[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	    { &mv_port_info[chip_soc], NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	struct mv_host_priv *hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	int n_ports = 0, irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 	 * Simple resource validation ..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	if (unlikely(pdev->num_resources != 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 		dev_err(&pdev->dev, "invalid number of resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	 * Get the register base first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	if (res == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	/* allocate host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	if (pdev->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 		rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 					   &n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 				"error parsing nr-ports property: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 		if (n_ports <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 			dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 				n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 		mv_platform_data = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 		n_ports = mv_platform_data->n_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 		irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	if (!host || !hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	hpriv->port_clks = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 					n_ports, sizeof(struct clk *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	if (!hpriv->port_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	hpriv->port_phys = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 					n_ports, sizeof(struct phy *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	if (!hpriv->port_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	host->private_data = hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 	hpriv->board_idx = chip_soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	host->iomap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 				   resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	if (!hpriv->base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	hpriv->base -= SATAHC0_REG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	hpriv->clk = clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	if (IS_ERR(hpriv->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		clk_prepare_enable(hpriv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 	for (port = 0; port < n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 		char port_number[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 		sprintf(port_number, "%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		if (!IS_ERR(hpriv->port_clks[port]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 			clk_prepare_enable(hpriv->port_clks[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 		sprintf(port_number, "port%d", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 							       port_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		if (IS_ERR(hpriv->port_phys[port])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 			rc = PTR_ERR(hpriv->port_phys[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 			hpriv->port_phys[port] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 			if (rc != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 				dev_warn(&pdev->dev, "error getting phy %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 			/* Cleanup only the initialized ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 			hpriv->n_ports = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 			phy_power_on(hpriv->port_phys[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	/* All the ports have been initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	hpriv->n_ports = n_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 	 * (Re-)program MBUS remapping windows if we are asked to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 	dram = mv_mbus_dram_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	if (dram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 		mv_conf_mbus_windows(hpriv, dram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	 * updated in the LP_PHY_CTL register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	if (pdev->dev.of_node &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 		of_device_is_compatible(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 					"marvell,armada-370-sata"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	/* initialize adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	rc = mv_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	dev_info(&pdev->dev, "slots %u ports %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	if (!IS_ERR(hpriv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		clk_disable_unprepare(hpriv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 		clk_put(hpriv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	for (port = 0; port < hpriv->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		if (!IS_ERR(hpriv->port_clks[port])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 			clk_disable_unprepare(hpriv->port_clks[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 			clk_put(hpriv->port_clks[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		phy_power_off(hpriv->port_phys[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)  *      mv_platform_remove    -       unplug a platform interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214)  *      @pdev: platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)  *      A platform bus SATA device has been unplugged. Perform the needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)  *      cleanup. Also called on module unload for any active devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) static int mv_platform_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	struct ata_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 	int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	ata_host_detach(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	if (!IS_ERR(hpriv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		clk_disable_unprepare(hpriv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 		clk_put(hpriv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	for (port = 0; port < host->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 		if (!IS_ERR(hpriv->port_clks[port])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 			clk_disable_unprepare(hpriv->port_clks[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 			clk_put(hpriv->port_clks[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		phy_power_off(hpriv->port_phys[port]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	struct ata_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	if (host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 		return ata_host_suspend(host, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) static int mv_platform_resume(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	struct ata_host *host = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	const struct mbus_dram_target_info *dram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 	if (host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 		struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 		 * (Re-)program MBUS remapping windows if we are asked to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		dram = mv_mbus_dram_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 		if (dram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 			mv_conf_mbus_windows(hpriv, dram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 		/* initialize adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 		ret = mv_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 			printk(KERN_ERR DRV_NAME ": Error during HW init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 		ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) #define mv_platform_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) #define mv_platform_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) static const struct of_device_id mv_sata_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	{ .compatible = "marvell,armada-370-sata", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	{ .compatible = "marvell,orion-sata", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) static struct platform_driver mv_platform_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	.probe		= mv_platform_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	.remove		= mv_platform_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	.suspend	= mv_platform_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	.resume		= mv_platform_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 		.name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		.of_match_table = of_match_ptr(mv_sata_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) static int mv_pci_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 			   const struct pci_device_id *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) static int mv_pci_device_resume(struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) static struct pci_driver mv_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 	.name			= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	.id_table		= mv_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	.probe			= mv_pci_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	.remove			= ata_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 	.suspend		= ata_pci_device_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	.resume			= mv_pci_device_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)  *      mv_print_info - Dump key info to kernel log for perusal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)  *      @host: ATA host to print info about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327)  *      FIXME: complete this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) static void mv_print_info(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	struct pci_dev *pdev = to_pci_dev(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	struct mv_host_priv *hpriv = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 	u8 scc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	const char *scc_s, *gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	/* Use this to determine the HW stepping of the chip so we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 	 * what errata to workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	if (scc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		scc_s = "SCSI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	else if (scc == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 		scc_s = "RAID";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 		scc_s = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	if (IS_GEN_I(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 		gen = "I";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 	else if (IS_GEN_II(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		gen = "II";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	else if (IS_GEN_IIE(hpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 		gen = "IIE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 		gen = "?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)  *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366)  *      @pdev: PCI device found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)  *      @ent: PCI device ID entry for the matched host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)  *      LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)  *      Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) static int mv_pci_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 			   const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	unsigned int board_idx = (unsigned int)ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	struct mv_host_priv *hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	int n_ports, port, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	ata_print_version_once(&pdev->dev, DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	/* allocate host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 	if (!host || !hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	host->private_data = hpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	hpriv->n_ports = n_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 	hpriv->board_idx = board_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 	/* acquire resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 	rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 		pcim_pin_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 	host->iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	hpriv->base = host->iomap[MV_PRIMARY_BAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 		dev_err(&pdev->dev, "DMA enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	for (port = 0; port < host->n_ports; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 		struct ata_port *ap = host->ports[port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 		unsigned int offset = port_mmio - hpriv->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 	/* initialize adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 	rc = mv_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 	/* Enable message-switched interrupts, if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 	if (msi && pci_enable_msi(pdev) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 		hpriv->hp_flags |= MV_HP_FLAG_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	mv_dump_pci_cfg(pdev, 0x68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	mv_print_info(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 	pci_try_set_mwi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) static int mv_pci_device_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 	struct ata_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 	rc = ata_pci_device_do_resume(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 	/* initialize adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 	rc = mv_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 	ata_host_resume(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) static int __init mv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 	int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 	rc = pci_register_driver(&mv_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 	rc = platform_driver_register(&mv_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 		pci_unregister_driver(&mv_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) static void __exit mv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 	pci_unregister_driver(&mv_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 	platform_driver_unregister(&mv_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) MODULE_AUTHOR("Brett Russ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) MODULE_VERSION(DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) MODULE_ALIAS("platform:" DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) module_init(mv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) module_exit(mv_exit);