^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ESP front-end for Amiga ZORRO SCSI systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * migration to ESP SCSI core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Blizzard 1230 DMA and probe function fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ZORRO bus code from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Amiga MacroSystemUS WarpEngine SCSI controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Amiga Technologies/DKB A4091 SCSI controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * plus modifications of the 53c7xx.c driver to support the Amiga.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/zorro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/amigahw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/amigaints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <scsi/scsi_transport_spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "esp_scsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* per-board register layout definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Blizzard 1230 DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct blz1230_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned char dma_addr; /* DMA address [0x0000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned char dmapad2[0x7fff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned char dma_latch; /* DMA latch [0x8000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Blizzard 1230II DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct blz1230II_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned char dma_addr; /* DMA address [0x0000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned char dmapad2[0xf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned char dma_latch; /* DMA latch [0x0010] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Blizzard 2060 DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct blz2060_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned char dma_led_ctrl; /* DMA led control [0x000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned char dmapad1[0x0f];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned char dmapad2[0x03];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned char dma_addr1; /* DMA address [0x014] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned char dmapad3[0x03];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned char dma_addr2; /* DMA address [0x018] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned char dmapad4[0x03];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* DMA control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define DMA_WRITE 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Cyberstorm DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct cyber_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned char dmapad1[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned char dma_addr1; /* DMA address [0x002] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned char dmapad2[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned char dma_addr2; /* DMA address [0x004] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned char dmapad3[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned char dmapad4[0x3fb];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned char cond_reg; /* DMA cond (ro) [0x402] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* DMA control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* DMA status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* The CyberStorm II DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct cyberII_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned char cond_reg; /* DMA cond (ro) [0x000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned char dmapad4[0x3f];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned char dmapad1[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned char dma_addr1; /* DMA address [0x044] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned char dmapad2[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned char dma_addr2; /* DMA address [0x048] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned char dmapad3[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Fastlane DMA interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct fastlane_dma_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned char cond_reg; /* DMA status (ro) [0x0000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) char dmapad1[0x3f];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * The controller registers can be found in the Z2 config area at these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * offsets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define FASTLANE_ESP_ADDR 0x1000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* DMA status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define FASTLANE_DMA_MINT 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define FASTLANE_DMA_IACT 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define FASTLANE_DMA_CREQ 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* DMA control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define FASTLANE_DMA_FCODE 0xa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define FASTLANE_DMA_MASK 0xf3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * private data used for driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct zorro_esp_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct esp *esp; /* our ESP instance - for Scsi_host* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void __iomem *board_base; /* virtual address (Zorro III board) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int zorro3; /* board is Zorro III */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned char ctrl_data; /* shadow copy of ctrl_reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * On all implementations except for the Oktagon, padding between ESP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * registers is three bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * On Oktagon, it is one byte - use a different accessor there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Oktagon needs PDMA - currently unsupported!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) writeb(val, esp->regs + (reg * 4UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return readb(esp->regs + (reg * 4UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int zorro_esp_irq_pending(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* check ESP status register; DMA has no status reg. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int cyber_esp_irq_pending(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned char dma_status = readb(&dregs->cond_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* It's important to check the DMA IRQ bit in the correct way! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (dma_status & CYBER_DMA_HNDL_INTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int fastlane_esp_irq_pending(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned char dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dma_status = readb(&dregs->cond_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (dma_status & FASTLANE_DMA_IACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0; /* not our IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Return non-zero if ESP requested IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) (dma_status & FASTLANE_DMA_CREQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (!(dma_status & FASTLANE_DMA_MINT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 dma_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return dma_len > (1U << 16) ? (1U << 16) : dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 dma_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* The old driver used 0xfffc as limit, so do that here too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return dma_len > 0xfffc ? 0xfffc : dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void zorro_esp_reset_dma(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void zorro_esp_dma_drain(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void zorro_esp_dma_invalidate(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void fastlane_esp_dma_invalidate(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned char *ctrl_data = &zep->ctrl_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) writeb(0, &dregs->clear_strobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) z_writel(0, zep->board_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Blizzard 1230/60 SCSI-IV DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Use PIO if transferring message bytes to esp->command_block_dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * PIO requires a virtual address, so substitute esp->command_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * for addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) addr >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) addr &= ~(DMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) addr |= DMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) writeb((addr >> 24) & 0xff, &dregs->dma_latch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) writeb((addr >> 24) & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) writeb((addr >> 16) & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) writeb((addr >> 8) & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) writeb(addr & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) scsi_esp_cmd(esp, ESP_CMD_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Blizzard 1230-II DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Use PIO if transferring message bytes to esp->command_block_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) addr >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) addr &= ~(DMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) addr |= DMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) writeb((addr >> 24) & 0xff, &dregs->dma_latch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) writeb((addr >> 16) & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writeb((addr >> 8) & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) writeb(addr & 0xff, &dregs->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) scsi_esp_cmd(esp, ESP_CMD_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Blizzard 2060 DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Use PIO if transferring message bytes to esp->command_block_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) addr >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) addr &= ~(DMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) addr |= DMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) writeb(addr & 0xff, &dregs->dma_addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) scsi_esp_cmd(esp, ESP_CMD_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Cyberstorm I DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned char *ctrl_data = &zep->ctrl_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Use PIO if transferring message bytes to esp->command_block_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) addr &= ~(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) addr |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) writeb(addr & 0xff, &dregs->dma_addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *ctrl_data &= ~(CYBER_DMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *ctrl_data |= CYBER_DMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) writeb(*ctrl_data, &dregs->ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* Cyberstorm II DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Use PIO if transferring message bytes to esp->command_block_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) addr &= ~(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) addr |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) writeb(addr & 0xff, &dregs->dma_addr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Fastlane DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u32 esp_count, u32 dma_count, int write, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u8 phase = esp->sreg & ESP_STAT_PMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned char *ctrl_data = &zep->ctrl_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Use PIO if transferring message bytes to esp->command_block_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (phase == ESP_MIP && addr == esp->command_block_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma_count, write, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) esp->send_cmd_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) esp->send_cmd_residual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* DMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) addr &= ~(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* DMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dma_sync_single_for_device(esp->dev, addr, esp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) addr |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) writeb(0, &dregs->clear_strobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) FASTLANE_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) FASTLANE_DMA_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) FASTLANE_DMA_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) writeb(*ctrl_data, &dregs->ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) scsi_esp_cmd(esp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int zorro_esp_dma_error(struct esp *esp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return esp->send_cmd_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* per-board ESP driver ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static const struct esp_driver_ops blz1230_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .irq_pending = zorro_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .dma_length_limit = zorro_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .dma_invalidate = zorro_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static const struct esp_driver_ops blz1230II_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) .irq_pending = zorro_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .dma_length_limit = zorro_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) .dma_invalidate = zorro_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static const struct esp_driver_ops blz2060_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) .irq_pending = zorro_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) .dma_length_limit = zorro_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) .dma_invalidate = zorro_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static const struct esp_driver_ops cyber_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .irq_pending = cyber_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .dma_length_limit = zorro_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .dma_invalidate = zorro_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static const struct esp_driver_ops cyberII_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .irq_pending = zorro_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .dma_length_limit = zorro_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .dma_invalidate = zorro_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static const struct esp_driver_ops fastlane_esp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .esp_write8 = zorro_esp_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .esp_read8 = zorro_esp_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) .irq_pending = fastlane_esp_irq_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) .dma_length_limit = fastlane_esp_dma_length_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .reset_dma = zorro_esp_reset_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .dma_drain = zorro_esp_dma_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .dma_invalidate = fastlane_esp_dma_invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .dma_error = zorro_esp_dma_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Zorro driver config data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct zorro_driver_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned long dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int absolute; /* offset is absolute address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int scsi_option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) const struct esp_driver_ops *esp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* board types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ZORRO_BLZ1230,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ZORRO_BLZ1230II,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ZORRO_BLZ2060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ZORRO_CYBER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ZORRO_CYBERII,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ZORRO_FASTLANE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* per-board config data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static const struct zorro_driver_data zorro_esp_boards[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) [ZORRO_BLZ1230] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) .name = "Blizzard 1230",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .offset = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .dma_offset = 0x10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .scsi_option = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .esp_ops = &blz1230_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) [ZORRO_BLZ1230II] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .name = "Blizzard 1230II",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .offset = 0x10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) .dma_offset = 0x10021,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) .scsi_option = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .esp_ops = &blz1230II_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) [ZORRO_BLZ2060] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) .name = "Blizzard 2060",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) .offset = 0x1ff00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) .dma_offset = 0x1ffe0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .esp_ops = &blz2060_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) [ZORRO_CYBER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .name = "CyberStormI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .offset = 0xf400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .dma_offset = 0xf800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .esp_ops = &cyber_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) [ZORRO_CYBERII] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) .name = "CyberStormII",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) .offset = 0x1ff03,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .dma_offset = 0x1ff43,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .scsi_option = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .esp_ops = &cyberII_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) [ZORRO_FASTLANE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .name = "Fastlane",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .offset = 0x1000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) .dma_offset = 0x1000041,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .esp_ops = &fastlane_esp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) { /* Blizzard 1230 IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .id = ZORRO_ID(PHASE5, 0x11, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .driver_data = ZORRO_BLZ1230,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .id = ZORRO_ID(PHASE5, 0x0B, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) .driver_data = ZORRO_BLZ1230II,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) { /* Blizzard 2060 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .id = ZORRO_ID(PHASE5, 0x18, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .driver_data = ZORRO_BLZ2060,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) { /* Cyberstorm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .id = ZORRO_ID(PHASE5, 0x0C, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .driver_data = ZORRO_CYBER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) { /* Cyberstorm II */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .id = ZORRO_ID(PHASE5, 0x19, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .driver_data = ZORRO_CYBERII,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) { 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int zorro_esp_probe(struct zorro_dev *z,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) const struct zorro_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct scsi_host_template *tpnt = &scsi_esp_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct esp *esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) const struct zorro_driver_data *zdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct zorro_esp_priv *zep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned long board, ioaddr, dmaaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) board = zorro_resource_start(z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) zdd = &zorro_esp_boards[ent->driver_data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) pr_info("%s found at address 0x%lx.\n", zdd->name, board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) zep = kzalloc(sizeof(*zep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!zep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) pr_err("Can't allocate device private data!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* let's figure out whether we have a Zorro II or Zorro III board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (board > 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) zep->zorro3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Even though most of these boards identify as Zorro II,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * they are in fact CPU expansion slot boards and have full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * access to all of memory. Fix up DMA bitmask here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * If Zorro III and ID matches Fastlane, our device table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * contains data for the Blizzard 1230 II board which does share the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * same ID. Fix up device table entry here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * TODO: Some Cyberstom060 boards also share this ID but would need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * to use the Cyberstorm I driver data ... we catch this by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * for presence of ESP chip later, but don't try to fix up yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) zdd->name, board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) zdd = &zorro_esp_boards[ZORRO_FASTLANE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (zdd->absolute) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ioaddr = zdd->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) dmaaddr = zdd->dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ioaddr = board + zdd->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dmaaddr = board + zdd->dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!zorro_request_device(z, zdd->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pr_err("cannot reserve region 0x%lx, abort\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) goto fail_free_zep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) host = scsi_host_alloc(tpnt, sizeof(struct esp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_err("No host detected; board configuration problem?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto fail_release_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) host->base = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) host->this_id = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) esp = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) esp->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) esp->dev = &z->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) esp->scsi_id = host->this_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) esp->scsi_id_mask = (1 << esp->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) esp->cfreq = 40000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) zep->esp = esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) dev_set_drvdata(esp->dev, zep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* additional setup required for Fastlane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* map full address space up to ESP base for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!zep->board_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pr_err("Cannot allocate board address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto fail_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* initialize DMA control shadow register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) zep->ctrl_data = (FASTLANE_DMA_FCODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) esp->ops = zdd->esp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ioaddr > 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) esp->regs = ioremap(ioaddr, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* ZorroII address space remapped nocache by early startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) esp->regs = ZTWO_VADDR(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (!esp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto fail_unmap_fastlane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) esp->fifo_reg = esp->regs + ESP_FDATA * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (zdd->scsi_option) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto fail_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (zep->zorro3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Only Fastlane Z3 for now - add switch for correct struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * dma_registers size if adding any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) esp->dma_regs = ioremap(dmaaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) sizeof(struct fastlane_dma_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* ZorroII address space remapped nocache by early startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) esp->dma_regs = ZTWO_VADDR(dmaaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!esp->dma_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) goto fail_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) esp->command_block = dma_alloc_coherent(esp->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) &esp->command_block_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!esp->command_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto fail_unmap_dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) host->irq = IRQ_AMIGA_PORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) "Amiga Zorro ESP", esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) goto fail_free_command_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* register the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) err = scsi_esp_register(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) goto fail_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) fail_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) free_irq(host->irq, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) fail_free_command_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dma_free_coherent(esp->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) esp->command_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) esp->command_block_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) fail_unmap_dma_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (zep->zorro3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) iounmap(esp->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) fail_unmap_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (ioaddr > 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) iounmap(esp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) fail_unmap_fastlane:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (zep->zorro3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) iounmap(zep->board_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) fail_free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) fail_release_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) zorro_release_device(z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) fail_free_zep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) kfree(zep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void zorro_esp_remove(struct zorro_dev *z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct esp *esp = zep->esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct Scsi_Host *host = esp->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) scsi_esp_unregister(esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) free_irq(host->irq, esp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) dma_free_coherent(esp->dev, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) esp->command_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) esp->command_block_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (zep->zorro3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) iounmap(zep->board_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) iounmap(esp->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (host->base > 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) iounmap(esp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) zorro_release_device(z);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) kfree(zep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static struct zorro_driver zorro_esp_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .id_table = zorro_esp_zorro_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .probe = zorro_esp_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .remove = zorro_esp_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int __init zorro_esp_scsi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return zorro_register_driver(&zorro_esp_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void __exit zorro_esp_scsi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) zorro_unregister_driver(&zorro_esp_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) module_init(zorro_esp_scsi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) module_exit(zorro_esp_scsi_exit);