^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/ata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/ecard.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define DRV_NAME "pata_icside"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define ICS_IDENT_OFFSET 0x2280
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define ICS_ARCIN_V5_INTRSTAT 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define ICS_ARCIN_V5_INTROFFSET 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct portinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned int dataoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int ctrloffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned int stepping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static const struct portinfo pata_icside_portinfo_v5 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .dataoffset = 0x2800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .ctrloffset = 0x2b80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .stepping = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static const struct portinfo pata_icside_portinfo_v6_1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .dataoffset = 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .ctrloffset = 0x2380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .stepping = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const struct portinfo pata_icside_portinfo_v6_2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .dataoffset = 0x3000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .ctrloffset = 0x3380,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .stepping = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct pata_icside_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void __iomem *irq_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void __iomem *ioc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u8 port_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u8 disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int speed[ATA_MAX_DEVICES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) } port[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct pata_icside_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct pata_icside_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct expansion_card *ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void __iomem *irqaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) const expansioncard_ops_t *irqops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int mwdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int nr_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) const struct portinfo *port[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long raw_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long raw_ioc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ICS_TYPE_A3IN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define ICS_TYPE_A3USER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ICS_TYPE_V6 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define ICS_TYPE_V5 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ICS_TYPE_NOTYPE ((unsigned int)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* ---------------- Version 5 PCB Support Functions --------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Purpose : enable interrupts from card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct pata_icside_state *state = ec->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Purpose : disable interrupts from card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct pata_icside_state *state = ec->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .irqenable = pata_icside_irqenable_arcin_v5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .irqdisable = pata_icside_irqdisable_arcin_v5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* ---------------- Version 6 PCB Support Functions --------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Purpose : enable interrupts from card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct pata_icside_state *state = ec->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void __iomem *base = state->irq_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!state->port[0].disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!state->port[1].disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Purpose : disable interrupts from card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct pata_icside_state *state = ec->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Purpose : detect an active interrupt from card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct pata_icside_state *state = ec->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .irqenable = pata_icside_irqenable_arcin_v6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .irqdisable = pata_icside_irqdisable_arcin_v6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .irqpending = pata_icside_irqpending_arcin_v6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * SG-DMA support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * There is only one DMA controller per card, which means that only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * one drive can be accessed at one time. NOTE! We do not enforce that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * here, but we rely on the main IDE driver spotting that both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * interfaces use the same IRQ, which should guarantee this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Configure the IOMD to give the appropriate timings for the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * mode being requested. We take the advice of the ATA standards, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * calculate the cycle time based on the transfer mode, and the EIDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * MW DMA specs that the drive provides in the IDENTIFY command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * We have the following IOMD DMA modes to choose from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Type Active Recovery Cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * A 250 (250) 312 (550) 562 (800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * B 187 (200) 250 (550) 437 (750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * C 125 (125) 125 (375) 250 (500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * D 62 (50) 125 (375) 187 (425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * (figures in brackets are actual measured timings on DIOR/DIOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * However, we also need to take care of the read/write active and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * recovery timings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Read Write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Mode Active -- Recovery -- Cycle IOMD type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * MW0 215 50 215 480 A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * MW1 80 50 50 150 C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * MW2 70 25 25 120 C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ata_timing t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) char iomd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * DMA is based on a 16MHz clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Choose the IOMD cycle timing which ensure that the interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * satisfies the measured active, recovery and cycle times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iomd_type = 'D', cycle = 187;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) iomd_type = 'C', cycle = 250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) iomd_type = 'B', cycle = 437;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) iomd_type = 'A', cycle = 562;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) t.active, t.recover, t.cycle, iomd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) state->port[ap->port_no].speed[adev->devno] = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * We are simplex; BUG if we try to fiddle with DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * while it's active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) BUG_ON(dma_channel_active(state->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Route the DMA signals to the correct interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) writeb(state->port[ap->port_no].port_sel, state->ioc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) set_dma_sg(state->dma, qc->sg, qc->n_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* issue r/w command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ap->ops->sff_exec_command(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) BUG_ON(dma_channel_active(state->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) enable_dma(state->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) disable_dma(state->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* see ata_bmdma_stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ata_sff_dma_pause(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static u8 pata_icside_bmdma_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void __iomem *irq_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ICS_ARCIN_V6_INTRSTAT_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int icside_dma_init(struct pata_icside_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct pata_icside_state *state = info->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct expansion_card *ec = info->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) for (i = 0; i < ATA_MAX_DEVICES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) state->port[0].speed[i] = 480;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) state->port[1].speed[i] = 480;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) state->dma = ec->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) info->mwdma_mask = ATA_MWDMA2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static struct scsi_host_template pata_icside_sht = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ATA_BASE_SHT(DRV_NAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) .sg_tablesize = SG_MAX_SEGMENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) .dma_boundary = IOMD_DMA_BOUNDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct pata_icside_state *state = ap->host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return ata_sff_postreset(link, classes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) state->port[ap->port_no].disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (state->type == ICS_TYPE_V6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Disable interrupts from this port, otherwise we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * receive spurious interrupts from the floating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * interrupt line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void __iomem *irq_port = state->irq_port +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) readb(irq_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct ata_port_operations pata_icside_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* no need to build any PRD tables for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .qc_prep = ata_noop_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .sff_data_xfer = ata_sff_data_xfer32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .bmdma_setup = pata_icside_bmdma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .bmdma_start = pata_icside_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .bmdma_stop = pata_icside_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .bmdma_status = pata_icside_bmdma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .cable_detect = ata_cable_40wire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .set_dmamode = pata_icside_set_dmamode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .postreset = pata_icside_postreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .port_start = ATA_OP_NULL, /* don't need PRD table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct pata_icside_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) const struct portinfo *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) void __iomem *cmd = base + port->dataoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ioaddr->cmd_addr = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ioaddr->ctl_addr = base + port->ctrloffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ioaddr->altstatus_addr = ioaddr->ctl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) info->raw_base + port->dataoffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) info->raw_base + port->ctrloffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (info->raw_ioc_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int pata_icside_register_v5(struct pata_icside_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct pata_icside_state *state = info->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) state->irq_port = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) info->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) info->irqmask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) info->irqops = &pata_icside_ops_arcin_v5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) info->nr_ports = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) info->port[0] = &pata_icside_portinfo_v5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int pata_icside_register_v6(struct pata_icside_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct pata_icside_state *state = info->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct expansion_card *ec = info->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void __iomem *ioc_base, *easi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned int sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!ioc_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) easi_base = ioc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!easi_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Enable access to the EASI region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) sel = 1 << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) writeb(sel, ioc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) state->irq_port = easi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) state->ioc_base = ioc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) state->port[0].port_sel = sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) state->port[1].port_sel = sel | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) info->base = easi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) info->irqops = &pata_icside_ops_arcin_v6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) info->nr_ports = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) info->port[0] = &pata_icside_portinfo_v6_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) info->port[1] = &pata_icside_portinfo_v6_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return icside_dma_init(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int pata_icside_add_ports(struct pata_icside_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct expansion_card *ec = info->ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (info->irqaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ec->irqaddr = info->irqaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ec->irqmask = info->irqmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (info->irqops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ecard_setirq(ec, info->irqops, info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Be on the safe side - disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ec->ops->irqdisable(ec, ec->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) host = ata_host_alloc(&ec->dev, info->nr_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) host->private_data = info->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) host->flags = ATA_HOST_SIMPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) for (i = 0; i < info->nr_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ap->pio_mask = ATA_PIO4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ap->mwdma_mask = info->mwdma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ap->flags |= ATA_FLAG_SLAVE_POSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ap->ops = &pata_icside_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) &pata_icside_sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int pata_icside_probe(struct expansion_card *ec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) const struct ecard_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct pata_icside_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct pata_icside_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) void __iomem *idmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = ecard_request_resources(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) state->type = ICS_TYPE_NOTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) state->dma = NO_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (idmem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) type = readb(idmem + ICS_IDENT_OFFSET) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ecardm_iounmap(ec, idmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) state->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) info.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) info.ec = ec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) switch (state->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case ICS_TYPE_A3IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dev_warn(&ec->dev, "A3IN unsupported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case ICS_TYPE_A3USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) dev_warn(&ec->dev, "A3USER unsupported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case ICS_TYPE_V5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = pata_icside_register_v5(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case ICS_TYPE_V6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = pata_icside_register_v6(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dev_warn(&ec->dev, "unknown interface type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = pata_icside_add_ports(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ecard_release_resources(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static void pata_icside_shutdown(struct expansion_card *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct ata_host *host = ecard_get_drvdata(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Disable interrupts from this card. We need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * this before disabling EASI since we may be accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * this register via that region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ec->ops->irqdisable(ec, ec->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Reset the ROM pointer so that we can read the ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * after a soft reboot. This also disables access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * the IDE taskfile via the EASI region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct pata_icside_state *state = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (state->ioc_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) writeb(0, state->ioc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void pata_icside_remove(struct expansion_card *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct ata_host *host = ecard_get_drvdata(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct pata_icside_state *state = host->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ata_host_detach(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pata_icside_shutdown(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * don't NULL out the drvdata - devres/libata wants it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * to free the ata_host structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (state->dma != NO_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) free_dma(state->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ecard_release_resources(ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static const struct ecard_id pata_icside_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) { MANU_ICS, PROD_ICS_IDE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) { MANU_ICS2, PROD_ICS2_IDE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) { 0xffff, 0xffff }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static struct ecard_driver pata_icside_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .probe = pata_icside_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .remove = pata_icside_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .shutdown = pata_icside_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .id_table = pata_icside_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static int __init pata_icside_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return ecard_register_driver(&pata_icside_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void __exit pata_icside_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ecard_remove_driver(&pata_icside_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) MODULE_DESCRIPTION("ICS PATA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) module_init(pata_icside_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) module_exit(pata_icside_exit);