^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Support for IDE interfaces on PowerMacs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * These IDE interfaces are memory-mapped and have a DBDMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for doing DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Some code taken from drivers/ide/ide-dma.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (c) 1995-1998 Mark Lord
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * TODO: - Use pre-calculated (kauai) timing tables all the time and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * get rid of the "rounded" tables used previously, so we have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * same table format for all controllers and can then just have one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * big table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/ide.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/adb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/dbdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/ide.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/pmac_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/mediabay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define DRV_NAME "ide-pmac"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #undef IDE_PMAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DMA_WAIT_TIMEOUT 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) typedef struct pmac_ide_hwif {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long regbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int aapl_bus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned broken_dma : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned broken_dma_warn : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct device_node* node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct macio_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 timings[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) volatile u32 __iomem * *kauai_fcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ide_hwif_t *hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Those fields are duplicating what is in hwif. We currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * can't use the hwif ones because of some assumptions that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * beeing done by the generic code about the kind of dma controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * and format of the dma table. This will have to be fixed though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) volatile struct dbdma_regs __iomem * dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct dbdma_cmd* dma_table_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) } pmac_ide_hwif_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) controller_ohare, /* OHare based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) controller_heathrow, /* Heathrow/Paddington */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) controller_kl_ata3, /* KeyLargo ATA-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) controller_kl_ata4, /* KeyLargo ATA-4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) controller_un_ata6, /* UniNorth2 ATA-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) controller_k2_ata6, /* K2 ATA-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) controller_sh_ata6, /* Shasta ATA-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static const char* model_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) "OHare ATA", /* OHare based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) "Heathrow ATA", /* Heathrow/Paddington */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Extra registers, both 32-bit little-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define IDE_TIMING_CONFIG 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define IDE_INTERRUPT 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Kauai (U2) ATA has different register setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define IDE_KAUAI_PIO_CONFIG 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define IDE_KAUAI_ULTRA_CONFIG 0x210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define IDE_KAUAI_POLL_CONFIG 0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Timing configuration register definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define IDE_SYSCLK_NS 30 /* 33Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* 133Mhz cell, found in shasta.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * See comments about 100 Mhz Uninorth 2...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Note that PIO_MASK and MDMA_MASK seem to overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define TR_133_PIOREG_PIO_MASK 0xff000fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define TR_133_PIOREG_MDMA_MASK 0x00fff800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define TR_133_UDMAREG_UDMA_EN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* 100Mhz cell, found in Uninorth 2. I don't have much infos about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * this one yet, it appears as a pci device (106b/0033) on uninorth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * internal PCI bus and it's clock is controlled like gem or fw. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * appears to be an evolution of keylargo ATA4 with a timing register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * extended to 2 32bits registers and a similar DBDMA channel. Other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * registers seem to exist but I can't tell much about them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * So far, I'm using pre-calculated tables for this extracted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * the values used by the MacOS X driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * register controls the UDMA timings. At least, it seems bit 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * of this one enables UDMA vs. MDMA, and bits 4..7 are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * cycle time in units of 10ns. Bits 8..15 are used by I don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * know their meaning yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define TR_100_PIOREG_PIO_MASK 0xff000fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define TR_100_PIOREG_MDMA_MASK 0x00fff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define TR_100_UDMAREG_UDMA_EN 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * 40 connector cable and to 4 on 80 connector one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Clock unit is 15ns (66Mhz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * 3 Values can be programmed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * - Write data setup, which appears to match the cycle time. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * also call it DIOW setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * - Ready to pause time (from spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * - Address setup. That one is weird. I don't see where exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * it fits in UDMA cycles, I got it's name from an obscure piece
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * of commented out code in Darwin. They leave it to 0, we do as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * well, despite a comment that would lead to think it has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * min value of 45ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Apple also add 60ns to the write data setup (or cycle time ?) on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define TR_66_UDMA_MASK 0xfff00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define TR_66_UDMA_ADDRSETUP_SHIFT 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define TR_66_UDMA_RDY2PAUS_SHIFT 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define TR_66_UDMA_WRDATASETUP_SHIFT 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define TR_66_MDMA_MASK 0x000ffc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define TR_66_MDMA_RECOVERY_MASK 0x000f8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define TR_66_MDMA_RECOVERY_SHIFT 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define TR_66_MDMA_ACCESS_MASK 0x00007c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define TR_66_MDMA_ACCESS_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define TR_66_PIO_MASK 0x000003ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define TR_66_PIO_RECOVERY_MASK 0x000003e0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define TR_66_PIO_RECOVERY_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define TR_66_PIO_ACCESS_MASK 0x0000001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define TR_66_PIO_ACCESS_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * The access time and recovery time can be programmed. Some older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Darwin code base limit OHare to 150ns cycle time. I decided to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * the same here fore safety against broken old hardware ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * The HalfTick bit, when set, adds half a clock (15ns) to the access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * time and removes one from recovery. It's not supported on KeyLargo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * implementation afaik. The E bit appears to be set for PIO mode 0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * is used to reach long timings used in this mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define TR_33_MDMA_MASK 0x003ff800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define TR_33_MDMA_RECOVERY_MASK 0x001f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define TR_33_MDMA_RECOVERY_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define TR_33_MDMA_ACCESS_MASK 0x0000f800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define TR_33_MDMA_ACCESS_SHIFT 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define TR_33_MDMA_HALFTICK 0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define TR_33_PIO_MASK 0x000007ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define TR_33_PIO_E 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define TR_33_PIO_RECOVERY_MASK 0x000003e0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define TR_33_PIO_RECOVERY_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define TR_33_PIO_ACCESS_MASK 0x0000001f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define TR_33_PIO_ACCESS_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Interrupt register definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define IDE_INTR_DMA 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define IDE_INTR_DEVICE 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * FCR Register on Kauai. Not sure what bit 0x4 is ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define KAUAI_FCR_UATA_MAGIC 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define KAUAI_FCR_UATA_RESET_N 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define KAUAI_FCR_UATA_ENABLE 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Rounded Multiword DMA timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * I gave up finding a generic formula for all controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * types and instead, built tables based on timing values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * used by Apple in Darwin's implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct mdma_timings_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int accessTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int recoveryTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int cycleTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct mdma_timings_t mdma_timings_33[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) { 240, 240, 480 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) { 180, 180, 360 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) { 135, 135, 270 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) { 120, 120, 240 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) { 105, 105, 210 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) { 90, 90, 180 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) { 75, 75, 150 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) { 75, 45, 120 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) { 0, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mdma_timings_t mdma_timings_33k[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) { 240, 240, 480 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) { 180, 180, 360 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) { 150, 150, 300 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) { 120, 120, 240 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) { 90, 120, 210 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) { 90, 90, 180 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) { 90, 60, 150 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) { 90, 30, 120 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) { 0, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct mdma_timings_t mdma_timings_66[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) { 240, 240, 480 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) { 180, 180, 360 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) { 135, 135, 270 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) { 120, 120, 240 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) { 105, 105, 210 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) { 90, 90, 180 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) { 90, 75, 165 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) { 75, 45, 120 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) { 0, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* KeyLargo ATA-4 Ultra DMA timings (rounded) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int addrSetup; /* ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int rdy2pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int wrDataSetup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) } kl66_udma_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) { 0, 180, 120 }, /* Mode 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) { 0, 150, 90 }, /* 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) { 0, 120, 60 }, /* 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) { 0, 90, 45 }, /* 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) { 0, 90, 30 } /* 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* UniNorth 2 ATA/100 timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct kauai_timing {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 timing_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct kauai_timing kauai_pio_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) { 930 , 0x08000fff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) { 600 , 0x08000a92 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) { 383 , 0x0800060f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) { 360 , 0x08000492 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) { 330 , 0x0800048f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) { 300 , 0x080003cf },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) { 270 , 0x080003cc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) { 240 , 0x0800038b },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) { 239 , 0x0800030c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) { 180 , 0x05000249 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) { 120 , 0x04000148 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static struct kauai_timing kauai_mdma_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) { 1260 , 0x00fff000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) { 480 , 0x00618000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) { 360 , 0x00492000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) { 270 , 0x0038e000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) { 240 , 0x0030c000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) { 210 , 0x002cb000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) { 180 , 0x00249000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) { 150 , 0x00209000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) { 120 , 0x00148000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static struct kauai_timing kauai_udma_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) { 120 , 0x000070c0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) { 90 , 0x00005d80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) { 60 , 0x00004a60 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) { 45 , 0x00003a50 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) { 30 , 0x00002a30 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) { 20 , 0x00002921 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static struct kauai_timing shasta_pio_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) { 930 , 0x08000fff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) { 600 , 0x0A000c97 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) { 383 , 0x07000712 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) { 360 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) { 330 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) { 300 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) { 270 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) { 240 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) { 239 , 0x040003cd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) { 180 , 0x0400028b },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) { 120 , 0x0400010a },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static struct kauai_timing shasta_mdma_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) { 1260 , 0x00fff000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) { 480 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) { 360 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) { 270 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) { 240 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) { 210 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) { 180 , 0x00820800 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) { 150 , 0x0028b000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) { 120 , 0x001ca000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static struct kauai_timing shasta_udma133_timings[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) { 120 , 0x00035901, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) { 90 , 0x000348b1, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) { 60 , 0x00033881, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) { 45 , 0x00033861, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) { 30 , 0x00033841, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) { 20 , 0x00033031, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) { 15 , 0x00033021, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) { 0 , 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) for (i=0; table[i].cycle_time; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (cycle_time > table[i+1].cycle_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return table[i].timing_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* allow up to 256 DBDMA commands per xfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define MAX_DCMDS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * Wait 1s for disk to answer on IDE bus after a hard reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * of the device (via GPIO/FCR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Some devices seem to "pollute" the bus even after dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * the BSY bit (typically some combo drives slave on the UDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * bus) after a hard reset. Since we hard reset all drives on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * KeyLargo ATA66, we have to keep that delay around. I may end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * up not hard resetting anymore on these and keep the delay only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * for older interfaces instead (we have to reset when coming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * from MacOS...) --BenH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define IDE_WAKEUP_DELAY (1*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #define PMAC_IDE_REG(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Apply the timings of the proper unit (master/slave) to the shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * timing register when selecting that unit. This version is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * ASICs with a single timing register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void pmac_ide_apply_timings(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (drive->dn & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Apply the timings of the proper unit (master/slave) to the shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * timing register when selecting that unit. This version is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * ASICs with a dual timing register (Kauai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (drive->dn & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Force an update of controller timing values for a given drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) pmac_ide_do_update_timings(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (pmif->kind == controller_sh_ata6 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pmif->kind == controller_un_ata6 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pmif->kind == controller_k2_ata6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pmac_ide_kauai_apply_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pmac_ide_apply_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void pmac_dev_select(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pmac_ide_apply_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) writeb(drive->select | ATA_DEVICE_OBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) (void __iomem *)drive->hwif->io_ports.device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void pmac_kauai_dev_select(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) pmac_ide_kauai_apply_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) writeb(drive->select | ATA_DEVICE_OBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (void __iomem *)drive->hwif->io_ports.device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) (void)readl((void __iomem *)(hwif->io_ports.data_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) + IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) (void)readl((void __iomem *)(hwif->io_ports.data_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) + IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Old tuning functions (called on hdparm -p), sets up drive PIO timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) const u8 pio = drive->pio_mode - XFER_PIO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) u32 *timings, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned accessTicks, recTicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned accessTime, recTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned int cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* which drive is it ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) timings = &pmif->timings[drive->dn & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) t = *timings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cycle_time = ide_pio_cycle_time(drive, pio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) switch (pmif->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) case controller_sh_ata6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* 133Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case controller_un_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case controller_k2_ata6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* 100Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case controller_kl_ata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* 66Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) recTime = cycle_time - tim->active - tim->setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) recTime = max(recTime, 150U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) accessTime = tim->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) accessTime = max(accessTime, 150U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) accessTicks = SYSCLK_TICKS_66(accessTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) accessTicks = min(accessTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) recTicks = SYSCLK_TICKS_66(recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) recTicks = min(recTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) t = (t & ~TR_66_PIO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) (recTicks << TR_66_PIO_RECOVERY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* 33Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int ebit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) recTime = cycle_time - tim->active - tim->setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) recTime = max(recTime, 150U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) accessTime = tim->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) accessTime = max(accessTime, 150U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) accessTicks = SYSCLK_TICKS(accessTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) accessTicks = min(accessTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) accessTicks = max(accessTicks, 4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) recTicks = SYSCLK_TICKS(recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) recTicks = min(recTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) recTicks = max(recTicks, 5U) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (recTicks > 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) recTicks--; /* guess, but it's only for PIO0, so... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ebit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) t = (t & ~TR_33_PIO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) (recTicks << TR_33_PIO_RECOVERY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (ebit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) t |= TR_33_PIO_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #ifdef IDE_PMAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) drive->name, pio, *timings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *timings = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pmac_ide_do_update_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Calculate KeyLargo ATA/66 UDMA timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) set_timings_udma_ata4(u32 *timings, u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (speed > XFER_UDMA_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) TR_66_UDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #ifdef IDE_PMAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) speed & 0xf, *timings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * Calculate Kauai ATA/100 UDMA timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct ide_timing *t = ide_timing_find_mode(speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) u32 tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (speed > XFER_UDMA_5 || t == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Calculate Shasta ATA/133 UDMA timings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct ide_timing *t = ide_timing_find_mode(speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u32 tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (speed > XFER_UDMA_6 || t == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Calculate MDMA timings for all cells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u8 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u16 *id = drive->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int cycleTime, accessTime = 0, recTime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned accessTicks, recTicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct mdma_timings_t* tm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Get default cycle time for mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) switch(speed & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case 0: cycleTime = 480; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case 1: cycleTime = 150; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case 2: cycleTime = 120; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Check if drive provides explicit DMA cycle time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* OHare limits according to some old Apple sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if ((intf_type == controller_ohare) && (cycleTime < 150))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) cycleTime = 150;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* Get the proper timing array for this controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) switch(intf_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) case controller_sh_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) case controller_un_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) case controller_k2_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case controller_kl_ata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) tm = mdma_timings_66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case controller_kl_ata3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tm = mdma_timings_33k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tm = mdma_timings_33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (tm != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Lookup matching access & recovery times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) i = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (tm[i+1].cycleTime < cycleTime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cycleTime = tm[i].cycleTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) accessTime = tm[i].accessTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) recTime = tm[i].recoveryTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #ifdef IDE_PMAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) drive->name, cycleTime, accessTime, recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) switch(intf_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case controller_sh_ata6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* 133Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) case controller_un_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) case controller_k2_ata6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* 100Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) case controller_kl_ata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* 66Mhz cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) accessTicks = SYSCLK_TICKS_66(accessTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) accessTicks = min(accessTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) accessTicks = max(accessTicks, 0x1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) recTicks = SYSCLK_TICKS_66(recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) recTicks = min(recTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) recTicks = max(recTicks, 0x3U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Clear out mdma bits and disable udma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) case controller_kl_ata3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* 33Mhz cell on KeyLargo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) accessTicks = SYSCLK_TICKS(accessTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) accessTicks = max(accessTicks, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) accessTicks = min(accessTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) accessTime = accessTicks * IDE_SYSCLK_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) recTicks = SYSCLK_TICKS(recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) recTicks = max(recTicks, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) recTicks = min(recTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) *timings = ((*timings) & ~TR_33_MDMA_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* 33Mhz cell on others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int halfTick = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int origAccessTime = accessTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int origRecTime = recTime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) accessTicks = SYSCLK_TICKS(accessTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) accessTicks = max(accessTicks, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) accessTicks = min(accessTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) accessTime = accessTicks * IDE_SYSCLK_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) recTicks = SYSCLK_TICKS(recTime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) recTicks = max(recTicks, 2U) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) recTicks = min(recTicks, 0x1fU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) recTime = (recTicks + 1) * IDE_SYSCLK_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if ((accessTicks > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) halfTick = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) accessTicks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *timings = ((*timings) & ~TR_33_MDMA_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (halfTick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *timings |= TR_33_MDMA_HALFTICK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #ifdef IDE_PMAC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) drive->name, speed & 0xf, *timings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) u32 *timings, *timings2, tl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) u8 unit = drive->dn & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) const u8 speed = drive->dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) timings = &pmif->timings[unit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) timings2 = &pmif->timings[unit+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Copy timings to local image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) tl[0] = *timings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) tl[1] = *timings2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (speed >= XFER_UDMA_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (pmif->kind == controller_kl_ata4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ret = set_timings_udma_ata4(&tl[0], speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) else if (pmif->kind == controller_un_ata6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) || pmif->kind == controller_k2_ata6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) else if (pmif->kind == controller_sh_ata6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Apply timings to controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *timings = tl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *timings2 = tl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) pmac_ide_do_update_timings(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * Blast some well known "safe" values to the timing registers at init or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * wakeup from sleep time, before we do real calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) sanitize_timings(pmac_ide_hwif_t *pmif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned int value, value2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) switch(pmif->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) case controller_sh_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) value = 0x0a820c97;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) value2 = 0x00033031;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) case controller_un_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) case controller_k2_ata6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) value = 0x08618a92;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) value2 = 0x00002921;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) case controller_kl_ata4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) value = 0x0008438c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case controller_kl_ata3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) value = 0x00084526;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case controller_heathrow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) case controller_ohare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) value = 0x00074526;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pmif->timings[0] = pmif->timings[1] = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pmif->timings[2] = pmif->timings[3] = value2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static int on_media_bay(pmac_ide_hwif_t *pmif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return pmif->mdev && pmif->mdev->media_bay != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Suspend call back, should be called after the child devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * have actually been suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* We clear the timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) pmif->timings[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) pmif->timings[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) disable_irq(pmif->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* The media bay will handle itself just fine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (on_media_bay(pmif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Kauai has bus control FCRs directly here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (pmif->kauai_fcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u32 fcr = readl(pmif->kauai_fcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) writel(fcr, pmif->kauai_fcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Disable the bus on older machines and the cell on kauai */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* Resume call back, should be called before the child devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * are resumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!on_media_bay(pmif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* Kauai has it different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (pmif->kauai_fcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) u32 fcr = readl(pmif->kauai_fcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) writel(fcr, pmif->kauai_fcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Sanitize drive timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) sanitize_timings(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) enable_irq(pmif->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct device_node *np = pmif->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) const char *cable = of_get_property(np, "cable-type", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct device_node *root = of_find_node_by_path("/");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) const char *model = of_get_property(root, "model", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) of_node_put(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* Get cable type from device-tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (cable && !strncmp(cable, "80-", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Some drives fail to detect 80c cable in PowerBook */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* These machine use proprietary short IDE cable anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!strncmp(model, "PowerBook", 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return ATA_CBL_PATA40_SHORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * G5's seem to have incorrect cable type in device-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Let's assume they have a 80 conductor cable, this seem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * to be always the case unless the user mucked around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (of_device_is_compatible(np, "K2-UATA") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) of_device_is_compatible(np, "shasta-ata"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return ATA_CBL_PATA80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return ATA_CBL_PATA40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void pmac_ide_init_dev(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (on_media_bay(pmif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) drive->dev_flags |= IDE_DFLAG_NOPROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static const struct ide_tp_ops pmac_tp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .exec_command = pmac_exec_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .read_status = ide_read_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .read_altstatus = ide_read_altstatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) .write_devctl = pmac_write_devctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .dev_select = pmac_dev_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) .tf_load = ide_tf_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .tf_read = ide_tf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) .input_data = ide_input_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .output_data = ide_output_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static const struct ide_tp_ops pmac_ata6_tp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .exec_command = pmac_exec_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .read_status = ide_read_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .read_altstatus = ide_read_altstatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .write_devctl = pmac_write_devctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .dev_select = pmac_kauai_dev_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .tf_load = ide_tf_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .tf_read = ide_tf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .input_data = ide_input_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .output_data = ide_output_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static const struct ide_port_ops pmac_ide_ata4_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .init_dev = pmac_ide_init_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .set_pio_mode = pmac_ide_set_pio_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .set_dma_mode = pmac_ide_set_dma_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .cable_detect = pmac_ide_cable_detect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static const struct ide_port_ops pmac_ide_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .init_dev = pmac_ide_init_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .set_pio_mode = pmac_ide_set_pio_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .set_dma_mode = pmac_ide_set_dma_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static const struct ide_dma_ops pmac_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static const struct ide_port_info pmac_port_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .init_dma = pmac_ide_init_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .chipset = ide_pmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .tp_ops = &pmac_tp_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .port_ops = &pmac_ide_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .dma_ops = &pmac_dma_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) IDE_HFLAG_POST_SET_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) IDE_HFLAG_MMIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) IDE_HFLAG_UNMASK_IRQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .pio_mask = ATA_PIO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .mwdma_mask = ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Setup, register & probe an IDE channel driven by this driver, this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * called by one of the 2 probe functions (macio or PCI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct device_node *np = pmif->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) const int *bidp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct ide_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct ide_hw *hws[] = { hw };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct ide_port_info d = pmac_port_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pmif->broken_dma = pmif->broken_dma_warn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (of_device_is_compatible(np, "shasta-ata")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) pmif->kind = controller_sh_ata6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) d.tp_ops = &pmac_ata6_tp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) d.port_ops = &pmac_ide_ata4_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) d.udma_mask = ATA_UDMA6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) } else if (of_device_is_compatible(np, "kauai-ata")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) pmif->kind = controller_un_ata6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) d.tp_ops = &pmac_ata6_tp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) d.port_ops = &pmac_ide_ata4_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) d.udma_mask = ATA_UDMA5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) } else if (of_device_is_compatible(np, "K2-UATA")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) pmif->kind = controller_k2_ata6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) d.tp_ops = &pmac_ata6_tp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) d.port_ops = &pmac_ide_ata4_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) d.udma_mask = ATA_UDMA5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) } else if (of_device_is_compatible(np, "keylargo-ata")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (of_node_name_eq(np, "ata-4")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) pmif->kind = controller_kl_ata4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) d.port_ops = &pmac_ide_ata4_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) d.udma_mask = ATA_UDMA4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pmif->kind = controller_kl_ata3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) } else if (of_device_is_compatible(np, "heathrow-ata")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) pmif->kind = controller_heathrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) pmif->kind = controller_ohare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pmif->broken_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bidp = of_get_property(np, "AAPL,bus-id", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) pmif->aapl_bus_id = bidp ? *bidp : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* On Kauai-type controllers, we make sure the FCR is correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (pmif->kauai_fcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) writel(KAUAI_FCR_UATA_MAGIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) KAUAI_FCR_UATA_RESET_N |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Make sure we have sane timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) sanitize_timings(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* If we are on a media bay, wait for it to settle and lock it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (pmif->mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) lock_media_bay(pmif->mdev->media_bay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) host = ide_host_alloc(&d, hws, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (host == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pmif->hwif = host->ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (on_media_bay(pmif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* Fixup bus ID for media bay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!bidp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) pmif->aapl_bus_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) } else if (pmif->kind == controller_ohare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* The code below is having trouble on some ohare machines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * (timing related ?). Until I can put my hand on one of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * units, I keep the old way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* This is necessary to enable IDE when net-booting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "bus ID %d%s, irq %d\n", model_name[pmif->kind],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rc = ide_host_register(host, &d, hws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) pmif->hwif = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (pmif->mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) unlock_media_bay(pmif->mdev->media_bay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (rc && host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ide_host_free(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) for (i = 0; i < 8; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) hw->io_ports_array[i] = base + i * 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) hw->io_ports.ctl_addr = base + 0x160;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Attach to a macio probed interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int pmac_ide_macio_attach(struct macio_dev *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) const struct of_device_id *match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned long regbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) pmac_ide_hwif_t *pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int irq, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct ide_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (pmif == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (macio_resource_count(mdev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) printk(KERN_WARNING "ide-pmac: no address for %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) goto out_free_pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Request memory resource for IO ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) "%pOF!\n", mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) goto out_free_pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* XXX This is bogus. Should be fixed in the registry by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * the kind of host interrupt controller, a bit like gatwick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * fixes in irq.c. That works well enough for the single case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * where that happens though...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (macio_irq_count(mdev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) "13\n", mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) irq = irq_create_mapping(NULL, 13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) irq = macio_irq(mdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) base = ioremap(macio_resource_start(mdev, 0), 0x400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) regbase = (unsigned long) base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) pmif->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) pmif->node = mdev->ofdev.dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) pmif->regbase = regbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pmif->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) pmif->kauai_fcr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (macio_resource_count(mdev) >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) printk(KERN_WARNING "ide-pmac: can't request DMA "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) "resource for %pOF!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) pmif->dma_regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) dev_set_drvdata(&mdev->ofdev.dev, pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) memset(&hw, 0, sizeof(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) pmac_ide_init_ports(&hw, pmif->regbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) hw.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) hw.dev = &mdev->bus->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) hw.parent = &mdev->ofdev.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) rc = pmac_ide_setup_device(pmif, &hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* The inteface is released to the common IDE layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) dev_set_drvdata(&mdev->ofdev.dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (pmif->dma_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) iounmap(pmif->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) macio_release_resource(mdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) macio_release_resource(mdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) kfree(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) out_free_pmif:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) kfree(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (mesg.event != mdev->ofdev.dev.power.power_state.event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) && (mesg.event & PM_EVENT_SLEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rc = pmac_ide_do_suspend(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) mdev->ofdev.dev.power.power_state = mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) pmac_ide_macio_resume(struct macio_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) rc = pmac_ide_do_resume(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) mdev->ofdev.dev.power.power_state = PMSG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * Attach to a PCI probed interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static int pmac_ide_pci_attach(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) pmac_ide_hwif_t *pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) unsigned long rbase, rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct ide_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) np = pci_device_to_OF_node(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (np == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (pmif == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (pci_enable_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) "%pOF\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) goto out_free_pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (pci_request_regions(pdev, "Kauai ATA")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) "%pOF\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) goto out_free_pmif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) pmif->mdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) pmif->node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) rbase = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) rlen = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) base = ioremap(rbase, rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pmif->regbase = (unsigned long) base + 0x2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) pmif->dma_regs = base + 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) pmif->kauai_fcr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) pmif->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) pci_set_drvdata(pdev, pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) memset(&hw, 0, sizeof(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pmac_ide_init_ports(&hw, pmif->regbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) hw.irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) hw.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) rc = pmac_ide_setup_device(pmif, &hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* The inteface is released to the common IDE layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) iounmap(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) kfree(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) out_free_pmif:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) kfree(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (mesg.event != pdev->dev.power.power_state.event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) && (mesg.event & PM_EVENT_SLEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) rc = pmac_ide_do_suspend(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) pdev->dev.power.power_state = mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) pmac_ide_pci_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) rc = pmac_ide_do_resume(pmif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) pdev->dev.power.power_state = PMSG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) #ifdef CONFIG_PMAC_MEDIABAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) switch(mb_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case MB_CD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (!pmif->hwif->present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ide_port_scan(pmif->hwif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (pmif->hwif->present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ide_port_unregister_devices(pmif->hwif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) #endif /* CONFIG_PMAC_MEDIABAY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static struct of_device_id pmac_ide_macio_match[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .name = "IDE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .name = "ATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .type = "ide",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .type = "ata",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static struct macio_driver pmac_ide_macio_driver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) .name = "ide-pmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .of_match_table = pmac_ide_macio_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .probe = pmac_ide_macio_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .suspend = pmac_ide_macio_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .resume = pmac_ide_macio_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) #ifdef CONFIG_PMAC_MEDIABAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) .mediabay_event = pmac_ide_macio_mb_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static const struct pci_device_id pmac_ide_pci_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static struct pci_driver pmac_ide_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) .name = "ide-pmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) .id_table = pmac_ide_pci_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) .probe = pmac_ide_pci_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) .suspend = pmac_ide_pci_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) .resume = pmac_ide_pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int __init pmac_ide_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!machine_is(powermac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) error = pci_register_driver(&pmac_ide_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) error = macio_register_driver(&pmac_ide_macio_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) pci_unregister_driver(&pmac_ide_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) error = macio_register_driver(&pmac_ide_macio_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) error = pci_register_driver(&pmac_ide_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) macio_unregister_driver(&pmac_ide_macio_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * pmac_ide_build_dmatable builds the DBDMA command list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * for a transfer and sets the DBDMA channel to point to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct dbdma_cmd *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) int i = cmd->sg_nents, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* DMA table is already aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) table = (struct dbdma_cmd *) pmif->dma_table_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* Make sure DMA controller is stopped (necessary ?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) while (readl(&dma->status) & RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* Build DBDMA commands list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sg = hwif->sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) while (i && sg_dma_len(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u32 cur_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) u32 cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) cur_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) cur_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (pmif->broken_dma_warn == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) printk(KERN_WARNING "%s: DMA on non aligned address, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) "switching to PIO on Ohare chipset\n", drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) pmif->broken_dma_warn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) while (cur_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (count++ >= MAX_DCMDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) printk(KERN_WARNING "%s: DMA table too small\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) table->req_count = cpu_to_le16(tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) table->phy_addr = cpu_to_le32(cur_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) table->cmd_dep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) table->xfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) table->res_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) cur_addr += tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) cur_len -= tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ++table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* convert the last command to an input/output last command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* add the stop command to the end of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) memset(table, 0, sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) table->command = cpu_to_le16(DBDMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) writel(hwif->dmatable_dma, &dma->cmdptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return 0; /* revert to PIO for this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * Prepare a DMA transfer. We build the DMA table, adjust the timings for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (pmac_ide_build_dmatable(drive, cmd) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* Apple adds 60ns to wrDataSetup on reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) PMAC_IDE_REG(IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * Kick the DMA controller into life after the DMA command has been issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * to the drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) pmac_ide_dma_start(ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) volatile struct dbdma_regs __iomem *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) dma = pmif->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) writel((RUN << 16) | RUN, &dma->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* Make sure it gets to the controller right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) (void)readl(&dma->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * After a DMA transfer, make sure the controller is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) pmac_ide_dma_end (ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) u32 dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) dstat = readl(&dma->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) writel(((RUN|WAKE|DEAD) << 16), &dma->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* verify good dma status. we don't check for ACTIVE beeing 0. We should...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * in theory, but with ATAPI decices doing buffer underruns, that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * cause us to disable DMA, which isn't what we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return (dstat & (RUN|DEAD)) != RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * Check out that the interrupt we got was for us. We can't always know this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * for sure with those Apple interfaces (well, we could on the recent ones but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * that's not implemented yet), on the other hand, we don't have shared interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * so it's not really a problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) pmac_ide_dma_test_irq (ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) unsigned long status, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* We have to things to deal with here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * - The dbdma won't stop if the command was started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * but completed with an error without transferring all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * datas. This happens when bad blocks are met during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * a multi-block transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * - The dbdma fifo hasn't yet finished flushing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * to system memory when the disk interrupt occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /* If ACTIVE is cleared, the STOP command have passed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * transfer is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) status = readl(&dma->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (!(status & ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* If dbdma didn't execute the STOP command yet, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * active bit is still set. We consider that we aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * sharing interrupts (which is hopefully the case with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * those controllers) and so we just try to flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * channel for pending data in the fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) writel((FLUSH << 16) | FLUSH, &dma->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) status = readl(&dma->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if ((status & FLUSH) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (++timeout > 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) hwif->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) pmac_ide_dma_lost_irq (ide_drive_t *drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) unsigned long status = readl(&dma->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static const struct ide_dma_ops pmac_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .dma_host_set = pmac_ide_dma_host_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) .dma_setup = pmac_ide_dma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) .dma_start = pmac_ide_dma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) .dma_end = pmac_ide_dma_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .dma_test_irq = pmac_ide_dma_test_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) .dma_lost_irq = pmac_ide_dma_lost_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * Allocate the data structures needed for using DMA with an interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * and fill the proper list of functions pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) struct pci_dev *dev = to_pci_dev(hwif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* We won't need pci_dev if we switch to generic consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * DMA routines ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (dev == NULL || pmif->dma_regs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * Allocate space for the DBDMA commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * The +2 is +1 for the stop command and +1 to allow for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * aligning the start address to a multiple of 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) &hwif->dmatable_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (pmif->dma_table_cpu == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) printk(KERN_ERR "%s: unable to allocate DMA command list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) hwif->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) hwif->sg_max_nents = MAX_DCMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) module_init(pmac_ide_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) MODULE_LICENSE("GPL");