Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* sun_esp.c: ESP front-end for Sparc SBUS systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "esp_scsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define DRV_MODULE_NAME		"sun_esp"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define PFX DRV_MODULE_NAME	": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define DRV_VERSION		"1.100"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define DRV_MODULE_RELDATE	"August 27, 2008"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define dma_read32(REG) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	sbus_readl(esp->dma_regs + (REG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define dma_write32(VAL, REG) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	sbus_writel((VAL), esp->dma_regs + (REG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* DVMA chip revisions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) enum dvma_rev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	dvmarev0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	dvmaesc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	dvmarev1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	dvmarev2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	dvmarev3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	dvmarevplus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	dvmahme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	esp->dma = dma_of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				   resource_size(&dma_of->resource[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 				   "espdma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (!esp->dma_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	case DMA_VERS0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		esp->dmarev = dvmarev0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	case DMA_ESCV1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		esp->dmarev = dvmaesc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	case DMA_VERS1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		esp->dmarev = dvmarev1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	case DMA_VERS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		esp->dmarev = dvmarev2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	case DMA_VERHME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		esp->dmarev = dvmahme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	case DMA_VERSPLUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		esp->dmarev = dvmarevplus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static int esp_sbus_map_regs(struct esp *esp, int hme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* On HME, two reg sets exist, first is DVMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * second is ESP registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (hme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		res = &op->resource[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		res = &op->resource[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (!esp->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int esp_sbus_map_command_block(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	esp->command_block = dma_alloc_coherent(esp->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 						&esp->command_block_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (!esp->command_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int esp_sbus_register_irq(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct Scsi_Host *host = esp->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	host->irq = op->archdata.irqs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (esp->scsi_id != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (esp->scsi_id != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					     "scsi-initiator-id", 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	esp->host->this_id = esp->scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	esp->scsi_id_mask = (1 << esp->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void esp_get_differential(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (of_find_property(dp, "differential", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		esp->flags |= ESP_FLAG_DIFFERENTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void esp_get_clock_params(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	struct device_node *bus_dp, *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	int fmhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	bus_dp = dp->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (fmhz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	esp->cfreq = fmhz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct device_node *dma_dp = dma_of->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u8 bursts, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (val != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		bursts &= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (val != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		bursts &= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (bursts == 0xff ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	    (bursts & DMA_BURST16) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	    (bursts & DMA_BURST32) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		bursts = (DMA_BURST32 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	esp->bursts = bursts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	esp_get_scsi_id(esp, espdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	esp_get_differential(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	esp_get_clock_params(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	esp_get_bursts(esp, espdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	sbus_writeb(val, esp->regs + (reg * 4UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return sbus_readb(esp->regs + (reg * 4UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int sbus_esp_irq_pending(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void sbus_esp_reset_dma(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	int can_do_burst16, can_do_burst32, can_do_burst64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	int can_do_sbus64, lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct platform_device *op = to_platform_device(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	can_do_burst64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	can_do_sbus64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (sbus_can_dma_64bit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		can_do_sbus64 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (sbus_can_burst64())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* Put the DVMA into a known state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (esp->dmarev != dvmahme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	switch (esp->dmarev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	case dvmahme:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		dma_write32(DMA_RESET_FAS366, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		dma_write32(DMA_RST_SCSI, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 					DMA_SCSI_DISAB | DMA_INT_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 					  DMA_BRST_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		if (can_do_burst64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			esp->prev_hme_dmacsr |= DMA_BRST64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		else if (can_do_burst32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			esp->prev_hme_dmacsr |= DMA_BRST32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (can_do_sbus64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			sbus_set_sbus64(&op->dev, esp->bursts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		lim = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			if (--lim == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				       "will not clear!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				       esp->host->unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		dma_write32(0, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		dma_write32(0, DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	case dvmarev2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		if (esp->rev != ESP100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			dma_write32(val | DMA_3CLKS, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	case dvmarev3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		val &= ~DMA_3CLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		val |= DMA_2CLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		if (can_do_burst32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			val &= ~DMA_BRST_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			val |= DMA_BRST32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		dma_write32(val, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	case dvmaesc1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		val |= DMA_ADD_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		val &= ~DMA_BCNT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		if (!can_do_burst32 && can_do_burst16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			val |= DMA_ESC_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			val &= ~(DMA_ESC_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		dma_write32(val, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	/* Enable interrupts.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void sbus_esp_dma_drain(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	int lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (esp->dmarev == dvmahme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	csr = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!(csr & DMA_FIFO_ISDRAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	lim = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (--lim == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			       esp->host->unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void sbus_esp_dma_invalidate(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (esp->dmarev == dvmahme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		dma_write32(DMA_RST_SCSI, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 					 (DMA_PARITY_OFF | DMA_2CLKS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 					~(DMA_ST_WRITE | DMA_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		dma_write32(0, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		/* This is necessary to avoid having the SCSI channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		 * engine lock up on us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		dma_write32(0, DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		int lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		lim = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			if (--lim == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 				printk(KERN_ALERT PFX "esp%d: DMA will not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				       "invalidate!\n", esp->host->unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		val |= DMA_FIFO_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		dma_write32(val, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		val &= ~DMA_FIFO_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		dma_write32(val, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 				  u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	BUG_ON(!(cmd & ESP_CMD_DMA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (esp->rev == FASHME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		sbus_esp_write8(esp, 0, FAS_RHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		csr = esp->prev_hme_dmacsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			csr |= DMA_ST_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			csr &= ~DMA_ST_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		esp->prev_hme_dmacsr = csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		dma_write32(dma_count, DMA_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		dma_write32(addr, DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		dma_write32(csr, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		csr = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		csr |= DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			csr |= DMA_ST_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			csr &= ~DMA_ST_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		dma_write32(csr, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		if (esp->dmarev == dvmaesc1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			dma_write32(end - addr, DMA_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		dma_write32(addr, DMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int sbus_esp_dma_error(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	u32 csr = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (csr & DMA_HNDL_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static const struct esp_driver_ops sbus_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	.esp_write8	=	sbus_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	.esp_read8	=	sbus_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	.irq_pending	=	sbus_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	.reset_dma	=	sbus_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	.dma_drain	=	sbus_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	.dma_invalidate	=	sbus_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	.dma_error	=	sbus_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int esp_sbus_probe_one(struct platform_device *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			      struct platform_device *espdma, int hme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct scsi_host_template *tpnt = &scsi_esp_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct esp *esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	host = scsi_host_alloc(tpnt, sizeof(struct esp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	host->max_id = (hme ? 16 : 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	esp = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	esp->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	esp->dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	esp->ops = &sbus_esp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (hme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	err = esp_sbus_setup_dma(esp, espdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		goto fail_unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	err = esp_sbus_map_regs(esp, hme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		goto fail_unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	err = esp_sbus_map_command_block(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		goto fail_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	err = esp_sbus_register_irq(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		goto fail_unmap_command_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	esp_sbus_get_props(esp, espdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	/* Before we try to touch the ESP chip, ESC1 dma can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	 * come up with the reset bit set, so make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	 * is clear first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (esp->dmarev == dvmaesc1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		u32 val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	dev_set_drvdata(&op->dev, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	err = scsi_esp_register(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		goto fail_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) fail_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	free_irq(host->irq, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) fail_unmap_command_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	dma_free_coherent(&op->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			  esp->command_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			  esp->command_block_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) fail_unmap_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) fail_unlink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static int esp_sbus_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct device_node *dma_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	struct device_node *dp = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct platform_device *dma_of = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	int hme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (of_node_name_eq(dp->parent, "espdma") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	    of_node_name_eq(dp->parent, "dma"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		dma_node = dp->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	else if (of_node_name_eq(dp, "SUNW,fas")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		dma_node = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		hme = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (dma_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		dma_of = of_find_device_by_node(dma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (!dma_of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	ret = esp_sbus_probe_one(op, dma_of, hme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		put_device(&dma_of->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int esp_sbus_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct esp *esp = dev_get_drvdata(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	struct platform_device *dma_of = esp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	unsigned int irq = esp->host->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	bool is_hme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	scsi_esp_unregister(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	/* Disable interrupts.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	val = dma_read32(DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	free_irq(irq, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	is_hme = (esp->dmarev == dvmahme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	dma_free_coherent(&op->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			  esp->command_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			  esp->command_block_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		   SBUS_ESP_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	of_iounmap(&dma_of->resource[0], esp->dma_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		   resource_size(&dma_of->resource[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	scsi_host_put(esp->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	dev_set_drvdata(&op->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	put_device(&dma_of->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static const struct of_device_id esp_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		.name = "SUNW,esp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		.name = "SUNW,fas",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		.name = "esp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) MODULE_DEVICE_TABLE(of, esp_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static struct platform_driver esp_sbus_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		.name = "esp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		.of_match_table = esp_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	.probe		= esp_sbus_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	.remove		= esp_sbus_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) module_platform_driver(esp_sbus_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) MODULE_DESCRIPTION("Sun ESP SCSI driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) MODULE_VERSION(DRV_VERSION);