^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * libata-sff.c - helper library for PCI IDE BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2003-2006 Jeff Garzik
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * libata documentation is available via 'make {ps|pdf}docs',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * as Documentation/driver-api/libata.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Hardware documentation available from http://www.t13.org/ and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * http://www.sata-io.org/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "libata.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct workqueue_struct *ata_sff_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) const struct ata_port_operations ata_sff_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) .inherits = &ata_base_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) .qc_prep = ata_noop_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) .qc_issue = ata_sff_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) .qc_fill_rtf = ata_sff_qc_fill_rtf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .freeze = ata_sff_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .thaw = ata_sff_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .prereset = ata_sff_prereset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .softreset = ata_sff_softreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .hardreset = sata_sff_hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .postreset = ata_sff_postreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .error_handler = ata_sff_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .sff_dev_select = ata_sff_dev_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .sff_check_status = ata_sff_check_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .sff_tf_load = ata_sff_tf_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .sff_tf_read = ata_sff_tf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .sff_exec_command = ata_sff_exec_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .sff_data_xfer = ata_sff_data_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .sff_drain_fifo = ata_sff_drain_fifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .lost_interrupt = ata_sff_lost_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL_GPL(ata_sff_port_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * ata_sff_check_status - Read device status reg & clear interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @ap: port where the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Reads ATA taskfile status register for currently-selected device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * and return its value. This also clears pending interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * from this device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u8 ata_sff_check_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return ioread8(ap->ioaddr.status_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) EXPORT_SYMBOL_GPL(ata_sff_check_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * ata_sff_altstatus - Read device alternate status reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @ap: port where the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Reads ATA taskfile alternate status register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * currently-selected device and return its value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Note: may NOT be used as the check_altstatus() entry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static u8 ata_sff_altstatus(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (ap->ops->sff_check_altstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return ap->ops->sff_check_altstatus(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return ioread8(ap->ioaddr.altstatus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * ata_sff_irq_status - Check if the device is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @ap: port where the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Determine if the port is currently busy. Uses altstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * if available in order to avoid clearing shared IRQ status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * when finding an IRQ source. Non ctl capable devices don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * share interrupt lines fortunately for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static u8 ata_sff_irq_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) status = ata_sff_altstatus(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Not us: We are busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (status & ATA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Clear INTRQ latch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) status = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * ata_sff_sync - Flush writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @ap: Port to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * CAUTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * If we have an mmio device with no ctl and no altstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * method this will fail. No such devices are known to exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void ata_sff_sync(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (ap->ops->sff_check_altstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ap->ops->sff_check_altstatus(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) else if (ap->ioaddr.altstatus_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ioread8(ap->ioaddr.altstatus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * ata_sff_pause - Flush writes and wait 400nS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @ap: Port to pause for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * CAUTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * If we have an mmio device with no ctl and no altstatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * method this will fail. No such devices are known to exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void ata_sff_pause(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ata_sff_sync(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ndelay(400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) EXPORT_SYMBOL_GPL(ata_sff_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * ata_sff_dma_pause - Pause before commencing DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @ap: Port to pause for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Perform I/O fencing and ensure sufficient cycle delays occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * for the HDMA1:0 transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void ata_sff_dma_pause(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* An altstatus read will cause the needed delay without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) messing up the IRQ status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ata_sff_altstatus(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* There are no DMA controllers without ctl. BUG here to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) we never violate the HDMA1:0 transition timing and risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) corruption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * ata_sff_busy_sleep - sleep until BSY clears, or timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @ap: port containing status register to be polled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @tmout_pat: impatience timeout in msecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @tmout: overall timeout in msecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Sleep until ATA Status register bit BSY clears,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * or a timeout occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Kernel thread context (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int ata_sff_busy_sleep(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long tmout_pat, unsigned long tmout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long timer_start, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) timer_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) timeout = ata_deadline(timer_start, tmout_pat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) while (status != 0xff && (status & ATA_BUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ata_msleep(ap, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (status != 0xff && (status & ATA_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ata_port_warn(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "port is slow to respond, please be patient (Status 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) timeout = ata_deadline(timer_start, tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) while (status != 0xff && (status & ATA_BUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ata_msleep(ap, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) status = ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (status == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (status & ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ata_port_err(ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "port failed to respond (%lu secs, Status 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) DIV_ROUND_UP(tmout, 1000), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int ata_sff_check_ready(struct ata_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u8 status = link->ap->ops->sff_check_status(link->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ata_check_ready(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * ata_sff_wait_ready - sleep until BSY clears, or timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @link: SFF link to wait ready status for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Sleep until ATA Status register bit BSY clears, or timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Kernel thread context (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return ata_wait_ready(link, deadline, ata_sff_check_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * ata_sff_set_devctl - Write device control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @ap: port where the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @ctl: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Writes ATA taskfile device control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Note: may NOT be used as the sff_set_devctl() entry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (ap->ops->sff_set_devctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ap->ops->sff_set_devctl(ap, ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) iowrite8(ctl, ap->ioaddr.ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * ata_sff_dev_select - Select device 0/1 on ATA bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * @ap: ATA channel to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @device: ATA device (numbered from zero) to select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Use the method defined in the ATA specification to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * make either device 0, or device 1, active on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * ATA channel. Works with both PIO and MMIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * May be used as the dev_select() entry in ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u8 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (device == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) tmp = ATA_DEVICE_OBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tmp = ATA_DEVICE_OBS | ATA_DEV1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) iowrite8(tmp, ap->ioaddr.device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ata_sff_pause(ap); /* needed; also flushes, for mmio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) EXPORT_SYMBOL_GPL(ata_sff_dev_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * ata_dev_select - Select device 0/1 on ATA bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @ap: ATA channel to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @device: ATA device (numbered from zero) to select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * @wait: non-zero to wait for Status register BSY bit to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @can_sleep: non-zero if context allows sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Use the method defined in the ATA specification to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * make either device 0, or device 1, active on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * ATA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * This is a high-level version of ata_sff_dev_select(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * additionally provides the services of inserting the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * pauses and status polling, where needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void ata_dev_select(struct ata_port *ap, unsigned int device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned int wait, unsigned int can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (ata_msg_probe(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) device, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ap->ops->sff_dev_select(ap, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ata_msleep(ap, 150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * ata_sff_irq_on - Enable interrupts on a port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @ap: Port on which interrupts are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Enable interrupts on a legacy IDE device using MMIO or PIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * wait for idle, clear any pending interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Note: may NOT be used as the sff_irq_on() entry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void ata_sff_irq_on(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ap->ops->sff_irq_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ap->ops->sff_irq_on(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ap->ctl &= ~ATA_NIEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ap->last_ctl = ap->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ata_sff_set_devctl(ap, ap->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) EXPORT_SYMBOL_GPL(ata_sff_irq_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * ata_sff_tf_load - send taskfile registers to host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @ap: Port to which output is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @tf: ATA taskfile register set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Outputs ATA taskfile to standard ATA host controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (tf->ctl != ap->last_ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ioaddr->ctl_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) iowrite8(tf->ctl, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ap->last_ctl = tf->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) WARN_ON_ONCE(!ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) iowrite8(tf->hob_feature, ioaddr->feature_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) tf->hob_feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tf->hob_nsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) tf->hob_lbal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) tf->hob_lbam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tf->hob_lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (is_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) iowrite8(tf->feature, ioaddr->feature_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) iowrite8(tf->nsect, ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) iowrite8(tf->lbal, ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) iowrite8(tf->lbam, ioaddr->lbam_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) iowrite8(tf->lbah, ioaddr->lbah_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) tf->feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) tf->nsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) tf->lbal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tf->lbam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tf->lbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (tf->flags & ATA_TFLAG_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) iowrite8(tf->device, ioaddr->device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) VPRINTK("device 0x%X\n", tf->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) EXPORT_SYMBOL_GPL(ata_sff_tf_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * ata_sff_tf_read - input device's ATA taskfile shadow registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @ap: Port from which input is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @tf: ATA taskfile register set for storing input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Reads ATA taskfile registers for currently-selected device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * into @tf. Assumes the device has a fully SFF compliant task file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * layout and behaviour. If you device does not (eg has a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * status method) then you will need to provide a replacement tf_read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) tf->command = ata_sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tf->feature = ioread8(ioaddr->error_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tf->nsect = ioread8(ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tf->lbal = ioread8(ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) tf->lbam = ioread8(ioaddr->lbam_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tf->lbah = ioread8(ioaddr->lbah_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) tf->device = ioread8(ioaddr->device_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (tf->flags & ATA_TFLAG_LBA48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (likely(ioaddr->ctl_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) tf->hob_feature = ioread8(ioaddr->error_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tf->hob_nsect = ioread8(ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) tf->hob_lbal = ioread8(ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) tf->hob_lbam = ioread8(ioaddr->lbam_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) tf->hob_lbah = ioread8(ioaddr->lbah_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) iowrite8(tf->ctl, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ap->last_ctl = tf->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) EXPORT_SYMBOL_GPL(ata_sff_tf_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * ata_sff_exec_command - issue ATA command to host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @ap: port to which command is being issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @tf: ATA taskfile register set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * Issues ATA command, with proper synchronization with interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * handler / other threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) iowrite8(tf->command, ap->ioaddr.command_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ata_sff_pause(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) EXPORT_SYMBOL_GPL(ata_sff_exec_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * ata_tf_to_host - issue ATA taskfile to host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @ap: port to which command is being issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @tf: ATA taskfile register set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Issues ATA taskfile register set to ATA host controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * with proper synchronization with interrupt handler and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * other threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static inline void ata_tf_to_host(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) const struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ap->ops->sff_tf_load(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ap->ops->sff_exec_command(ap, tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * ata_sff_data_xfer - Transfer data by PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @qc: queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @buflen: buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @rw: read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Transfer data from/to the device data register by PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Bytes consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned int buflen, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct ata_port *ap = qc->dev->link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) void __iomem *data_addr = ap->ioaddr.data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned int words = buflen >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Transfer multiple of 2 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (rw == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ioread16_rep(data_addr, buf, words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) iowrite16_rep(data_addr, buf, words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Transfer trailing byte, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (unlikely(buflen & 0x01)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned char pad[2] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Point buf to the tail of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) buf += buflen - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Use io*16_rep() accessors here as well to avoid pointlessly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * swapping bytes to and from on the big endian machines...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (rw == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ioread16_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) *buf = pad[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) pad[0] = *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) iowrite16_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) words++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return words << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * ata_sff_data_xfer32 - Transfer data by PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * @qc: queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @buf: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * @buflen: buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @rw: read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Transfer data from/to the device data register by PIO using 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * I/O operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Bytes consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int buflen, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct ata_device *dev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct ata_port *ap = dev->link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) void __iomem *data_addr = ap->ioaddr.data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned int words = buflen >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int slop = buflen & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!(ap->pflags & ATA_PFLAG_PIO32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return ata_sff_data_xfer(qc, buf, buflen, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Transfer multiple of 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (rw == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ioread32_rep(data_addr, buf, words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) iowrite32_rep(data_addr, buf, words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Transfer trailing bytes, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (unlikely(slop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned char pad[4] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Point buf to the tail of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) buf += buflen - slop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Use io*_rep() accessors here as well to avoid pointlessly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * swapping bytes to and from on the big endian machines...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (rw == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (slop < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ioread16_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ioread32_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) memcpy(buf, pad, slop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) memcpy(pad, buf, slop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (slop < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) iowrite16_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) iowrite32_rep(data_addr, pad, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return (buflen + 1) & ~1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned int offset, size_t xfer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) buf = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) kunmap_atomic(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!do_write && !PageSlab(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * ata_pio_sector - Transfer a sector of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @qc: Command on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Transfer qc->sect_size bytes of data from/to the ATA device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static void ata_pio_sector(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!qc->cursg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) qc->curbytes = qc->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (qc->curbytes == qc->nbytes - qc->sect_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) page = sg_page(qc->cursg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) offset = qc->cursg->offset + qc->cursg_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* get the current page and offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) page = nth_page(page, (offset >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) offset %= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Split the transfer when it splits a page boundary. Note that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * split still has to be dword aligned like all ATA data transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) WARN_ON_ONCE(offset % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (offset + qc->sect_size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned int split_len = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ata_pio_xfer(qc, page, offset, split_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ata_pio_xfer(qc, nth_page(page, 1), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) qc->sect_size - split_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ata_pio_xfer(qc, page, offset, qc->sect_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) qc->curbytes += qc->sect_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) qc->cursg_ofs += qc->sect_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (qc->cursg_ofs == qc->cursg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) qc->cursg = sg_next(qc->cursg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!qc->cursg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) qc->cursg_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * ata_pio_sectors - Transfer one or many sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * @qc: Command on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Transfer one or many sectors of data from/to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * ATA device for the DRQ request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static void ata_pio_sectors(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (is_multi_taskfile(&qc->tf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* READ/WRITE MULTIPLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned int nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) WARN_ON_ONCE(qc->dev->multi_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) qc->dev->multi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) while (nsect--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ata_pio_sector(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ata_pio_sector(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ata_sff_sync(qc->ap); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * atapi_send_cdb - Write CDB bytes to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @ap: Port to which ATAPI device is attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @qc: Taskfile currently active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * When device has indicated its readiness to accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * a CDB, this function is called. Send the CDB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* send SCSI cdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) DPRINTK("send cdb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) WARN_ON_ONCE(qc->dev->cdb_len < 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ata_sff_sync(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* FIXME: If the CDB is for DMA do we need to do the transition delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) or is bmdma_start guaranteed to do it ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ap->hsm_task_state = HSM_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #ifdef CONFIG_ATA_BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* initiate bmdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ap->ops->bmdma_start(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #endif /* CONFIG_ATA_BMDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @qc: Command on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * @bytes: number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Transfer Transfer data from/to the ATAPI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct ata_device *dev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct ata_eh_info *ehi = &dev->link->eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned int offset, count, consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) next_sg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) sg = qc->cursg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (unlikely(!sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) "buf=%u cur=%u bytes=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) qc->nbytes, qc->curbytes, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) offset = sg->offset + qc->cursg_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* get the current page and offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) page = nth_page(page, (offset >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) offset %= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* don't overrun current sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) count = min(sg->length - qc->cursg_ofs, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* don't cross page boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) count = min(count, (unsigned int)PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* do the actual data transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) buf = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) kunmap_atomic(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) bytes -= min(bytes, consumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) qc->curbytes += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) qc->cursg_ofs += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (qc->cursg_ofs == sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) qc->cursg = sg_next(qc->cursg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) qc->cursg_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * check correctly as it doesn't know if it is the last request being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * made. Somebody should implement a proper sanity check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto next_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * atapi_pio_bytes - Transfer data from/to the ATAPI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @qc: Command on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Transfer Transfer data from/to the ATAPI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void atapi_pio_bytes(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct ata_device *dev = qc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct ata_eh_info *ehi = &dev->link->eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) unsigned int ireason, bc_lo, bc_hi, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Abuse qc->result_tf for temp storage of intermediate TF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * here to save some kernel stack usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * For normal completion, qc->result_tf is not relevant. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * error, qc->result_tf is later overwritten by ata_qc_complete().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * So, the correctness of qc->result_tf is not affected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ap->ops->sff_tf_read(ap, &qc->result_tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ireason = qc->result_tf.nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) bc_lo = qc->result_tf.lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bc_hi = qc->result_tf.lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) bytes = (bc_hi << 8) | bc_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* shall be cleared to zero, indicating xfer of data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (unlikely(ireason & ATAPI_COD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto atapi_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* make sure transfer direction matches expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (unlikely(do_write != i_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) goto atapi_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (unlikely(!bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto atapi_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (unlikely(__atapi_pio_bytes(qc, bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ata_sff_sync(ap); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) atapi_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ireason, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * @ap: the target ata_port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @qc: qc on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * 1 if ok in workqueue, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ap->hsm_task_state == HSM_ST_FIRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (qc->tf.protocol == ATA_PROT_PIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) (qc->tf.flags & ATA_TFLAG_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (ata_is_atapi(qc->tf.protocol) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * ata_hsm_qc_complete - finish a qc running on standard HSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * @qc: Command to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * @in_wq: 1 if called from workqueue, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Finish @qc which is running on standard HSM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * If @in_wq is zero, spin_lock_irqsave(host lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Otherwise, none on entry and grabs host lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (ap->ops->error_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (in_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* EH might have kicked in while host lock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) qc = ata_qc_from_tag(ap, qc->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (qc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (likely(!(qc->err_mask & AC_ERR_HSM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ata_sff_irq_on(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (likely(!(qc->err_mask & AC_ERR_HSM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ata_port_freeze(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (in_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ata_sff_irq_on(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * ata_sff_hsm_move - move the HSM to the next state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @ap: the target ata_port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @qc: qc on going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @status: current device status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @in_wq: 1 if called from workqueue, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * 1 when poll next status needed, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) u8 status, int in_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct ata_link *link = qc->dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct ata_eh_info *ehi = &link->eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int poll_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) lockdep_assert_held(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Make sure ata_sff_qc_issue() does not throw things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * like DMA polling into the workqueue. Notice that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) fsm_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) switch (ap->hsm_task_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) case HSM_ST_FIRST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* Send first data block or PACKET CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* If polling, we will stay in the work queue after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * sending the data. Otherwise, interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * takes over after sending the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* check device status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (unlikely((status & ATA_DRQ) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* handle BSY=0, DRQ=0 as error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (likely(status & (ATA_ERR | ATA_DF)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* device stops HSM for abort/error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) qc->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* HSM violation. Let EH handle this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ata_ehi_push_desc(ehi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) "ST_FIRST: !(DRQ|ERR|DF)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Device should not ask for data transfer (DRQ=1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * when it finds something wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * We ignore DRQ here and stop the HSM by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * changing hsm_task_state to HSM_ST_ERR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * let the EH abort the command or reset the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (unlikely(status & (ATA_ERR | ATA_DF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Some ATAPI tape drives forget to clear the ERR bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * when doing the next command (mostly request sense).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * We ignore ERR here to workaround and proceed sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * the CDB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ata_ehi_push_desc(ehi, "ST_FIRST: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) "DRQ=1 with device error, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) "dev_stat 0x%X", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (qc->tf.protocol == ATA_PROT_PIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* PIO data out protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * send first data block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* ata_pio_sectors() might change the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * to HSM_ST_LAST. so, the state is changed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * before ata_pio_sectors().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ap->hsm_task_state = HSM_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ata_pio_sectors(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* send CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) atapi_send_cdb(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* if polling, ata_sff_pio_task() handles the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * otherwise, interrupt handler takes over from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case HSM_ST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* complete command or read/write the data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (qc->tf.protocol == ATAPI_PROT_PIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* ATAPI PIO protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if ((status & ATA_DRQ) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* No more data to transfer or device error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Device error will be tagged in HSM_ST_LAST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* Device should not ask for data transfer (DRQ=1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * when it finds something wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * We ignore DRQ here and stop the HSM by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * changing hsm_task_state to HSM_ST_ERR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * let the EH abort the command or reset the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (unlikely(status & (ATA_ERR | ATA_DF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ata_ehi_push_desc(ehi, "ST-ATAPI: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) "DRQ=1 with device error, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) "dev_stat 0x%X", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) atapi_pio_bytes(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* bad ireason reported by device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* ATA PIO protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (unlikely((status & ATA_DRQ) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* handle BSY=0, DRQ=0 as error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (likely(status & (ATA_ERR | ATA_DF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* device stops HSM for abort/error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) qc->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* If diagnostic failed and this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * IDENTIFY, it's likely a phantom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * device. Mark hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (qc->dev->horkage &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ATA_HORKAGE_DIAGNOSTIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) qc->err_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) AC_ERR_NODEV_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /* HSM violation. Let EH handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * Phantom devices also trigger this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * condition. Mark hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ata_ehi_push_desc(ehi, "ST-ATA: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) "DRQ=0 without device error, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) "dev_stat 0x%X", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) qc->err_mask |= AC_ERR_HSM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) AC_ERR_NODEV_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* For PIO reads, some devices may ask for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * data transfer (DRQ=1) alone with ERR=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * We respect DRQ here and transfer one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * block of junk data before changing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * hsm_task_state to HSM_ST_ERR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * For PIO writes, ERR=1 DRQ=1 doesn't make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * sense since the data block has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * transferred to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (unlikely(status & (ATA_ERR | ATA_DF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* data might be corrputed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) qc->err_mask |= AC_ERR_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ata_pio_sectors(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) status = ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (status & (ATA_BUSY | ATA_DRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ata_ehi_push_desc(ehi, "ST-ATA: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) "BUSY|DRQ persists on ERR|DF, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) "dev_stat 0x%X", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* There are oddball controllers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * status register stuck at 0x7f and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * lbal/m/h at zero which makes it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * pass all other presence detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * mechanisms we have. Set NODEV_HINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * for it. Kernel bz#7241.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (status == 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) qc->err_mask |= AC_ERR_NODEV_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* ata_pio_sectors() might change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * state to HSM_ST_LAST. so, the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * is changed after ata_pio_sectors().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ata_pio_sectors(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (ap->hsm_task_state == HSM_ST_LAST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* all data read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) status = ata_wait_idle(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) poll_next = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) case HSM_ST_LAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (unlikely(!ata_ok(status))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) qc->err_mask |= __ac_err_mask(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* no more data to transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ap->print_id, qc->dev->devno, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ap->hsm_task_state = HSM_ST_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* complete taskfile transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ata_hsm_qc_complete(qc, in_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) poll_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) case HSM_ST_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ap->hsm_task_state = HSM_ST_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* complete taskfile transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ata_hsm_qc_complete(qc, in_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) poll_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) poll_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) WARN(true, "ata%d: SFF host state machine in invalid state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ap->print_id, ap->hsm_task_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return poll_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) void ata_sff_queue_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) queue_work(ata_sff_wq, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) EXPORT_SYMBOL_GPL(ata_sff_queue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) queue_delayed_work(ata_sff_wq, dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) WARN_ON((ap->sff_pio_task_link != NULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) (ap->sff_pio_task_link != link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) ap->sff_pio_task_link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* may fail if ata_sff_flush_pio_task() in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) void ata_sff_flush_pio_task(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) cancel_delayed_work_sync(&ap->sff_pio_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * We wanna reset the HSM state to IDLE. If we do so without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * grabbing the port lock, critical sections protected by it which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * expect the HSM state to stay stable may get surprised. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * example, we may set IDLE in between the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) spin_lock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ap->hsm_task_state = HSM_ST_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) spin_unlock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ap->sff_pio_task_link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (ata_msg_ctl(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ata_port_dbg(ap, "%s: EXIT\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void ata_sff_pio_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct ata_port *ap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) container_of(work, struct ata_port, sff_pio_task.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct ata_link *link = ap->sff_pio_task_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) int poll_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_lock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) BUG_ON(ap->sff_pio_task_link == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* qc can be NULL if timeout occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) qc = ata_qc_from_tag(ap, link->active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (!qc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ap->sff_pio_task_link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) fsm_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * This is purely heuristic. This is a fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * Sometimes when we enter, BSY will be cleared in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * a chk-status or two. If not, the drive is probably seeking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * or something. Snooze for a couple msecs, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * chk-status again. If still busy, queue delayed work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (status & ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) spin_unlock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ata_msleep(ap, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) spin_lock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (status & ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * hsm_move() may trigger another command to be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * clean the link beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ap->sff_pio_task_link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* move the HSM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) poll_next = ata_sff_hsm_move(ap, qc, status, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /* another command or interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * may be running at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (poll_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) goto fsm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) spin_unlock_irq(ap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * ata_sff_qc_issue - issue taskfile to a SFF controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * @qc: command to issue to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * This function issues a PIO or NODATA command to a SFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * Zero on success, AC_ERR_* mask on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct ata_link *link = qc->dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* Use polling pio if the LLD doesn't handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * interrupt driven pio and atapi CDB interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (ap->flags & ATA_FLAG_PIO_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) qc->tf.flags |= ATA_TFLAG_POLLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* select the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ata_dev_select(ap, qc->dev->devno, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* start the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ata_qc_set_polling(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ata_tf_to_host(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) ata_qc_set_polling(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ata_tf_to_host(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (qc->tf.flags & ATA_TFLAG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* PIO data out protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ap->hsm_task_state = HSM_ST_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* always send first data block using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * ata_sff_pio_task() codepath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* PIO data in protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ap->hsm_task_state = HSM_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* if polling, ata_sff_pio_task() handles the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * rest. otherwise, interrupt handler takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * over from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ata_qc_set_polling(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ata_tf_to_host(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ap->hsm_task_state = HSM_ST_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* send cdb by polling if no cdb interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) (qc->tf.flags & ATA_TFLAG_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * @qc: qc to fill result TF for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * @qc is finished and result TF needs to be filled. Fill it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * using ->sff_tf_read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * true indicating that result TF is successfully filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static unsigned int ata_sff_idle_irq(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ap->stats.idle_irq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #ifdef ATA_IRQ_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if ((ap->stats.idle_irq % 1000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ata_port_warn(ap, "irq trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) return 0; /* irq not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static unsigned int __ata_sff_port_intr(struct ata_port *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct ata_queued_cmd *qc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) bool hsmv_on_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) VPRINTK("ata%u: protocol %d task_state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ap->print_id, qc->tf.protocol, ap->hsm_task_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Check whether we are expecting interrupt in this state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) switch (ap->hsm_task_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) case HSM_ST_FIRST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /* Some pre-ATAPI-4 devices assert INTRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * at this state when ready to receive CDB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * The flag was turned on only for atapi devices. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * need to check ata_is_atapi(qc->tf.protocol) again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return ata_sff_idle_irq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) case HSM_ST_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return ata_sff_idle_irq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* check main status, clearing INTRQ if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) status = ata_sff_irq_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (status & ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (hsmv_on_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* BMDMA engine is already stopped, we're screwed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) qc->err_mask |= AC_ERR_HSM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return ata_sff_idle_irq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /* clear irq events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ata_sff_hsm_move(ap, qc, status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return 1; /* irq handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * ata_sff_port_intr - Handle SFF port interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * @ap: Port on which interrupt arrived (possibly...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * @qc: Taskfile currently active in engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * Handle port interrupt for given queued command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * One if interrupt was handled, zero if not (shared irq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return __ata_sff_port_intr(ap, qc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) EXPORT_SYMBOL_GPL(ata_sff_port_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct ata_host *host = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) bool retried = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) unsigned int handled, idle, polling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) handled = idle = polling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (qc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (!(qc->tf.flags & ATA_TFLAG_POLLING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) handled |= port_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) polling |= 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) idle |= 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * If no port was expecting IRQ but the controller is actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * asserting IRQ line, nobody cared will ensue. Check IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * pending status if available and clear spurious IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!handled && !retried) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) bool retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) for (i = 0; i < host->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (polling & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!ap->ops->sff_irq_check ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) !ap->ops->sff_irq_check(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (idle & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* clear INTRQ and check if BUSY cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) retry |= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * With command in flight, we can't do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * sff_irq_clear() w/o racing with completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) retried = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * ata_sff_interrupt - Default SFF ATA host interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * @irq: irq line (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * @dev_instance: pointer to our ata_host information structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * Default interrupt handler for PCI IDE devices. Calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * ata_sff_port_intr() for each port that is not disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Obtains host lock during operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * IRQ_NONE or IRQ_HANDLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) EXPORT_SYMBOL_GPL(ata_sff_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * ata_sff_lost_interrupt - Check for an apparent lost interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * @ap: port that appears to have timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * Called from the libata error handlers when the core code suspects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * an interrupt has been lost. If it has complete anything we can and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * then return. Interface must support altstatus for this faster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * recovery to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * Locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * Caller holds host lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) void ata_sff_lost_interrupt(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* Only one outstanding command per SFF channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) qc = ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /* We cannot lose an interrupt on a non-existent or polled command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /* See if the controller thinks it is still busy - if so the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) isn't a lost IRQ but is still in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) status = ata_sff_altstatus(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (status & ATA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /* There was a command running, we are no longer busy and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) no interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /* Run the host interrupt logic as if the interrupt had not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ata_sff_port_intr(ap, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * ata_sff_freeze - Freeze SFF controller port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * @ap: port to freeze
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * Freeze SFF controller port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) void ata_sff_freeze(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) ap->ctl |= ATA_NIEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) ap->last_ctl = ap->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) ata_sff_set_devctl(ap, ap->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Under certain circumstances, some controllers raise IRQ on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * ATA_NIEN manipulation. Also, many controllers fail to mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * previously pending IRQ on ATA_NIEN assertion. Clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) EXPORT_SYMBOL_GPL(ata_sff_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * ata_sff_thaw - Thaw SFF controller port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * @ap: port to thaw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * Thaw SFF controller port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) void ata_sff_thaw(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* clear & re-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ata_sff_irq_on(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) EXPORT_SYMBOL_GPL(ata_sff_thaw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * ata_sff_prereset - prepare SFF link for reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * @link: SFF link to be reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * SFF link @link is about to be reset. Initialize it. It first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * calls ata_std_prereset() and wait for !BSY if the port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * being softreset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct ata_eh_context *ehc = &link->eh_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) rc = ata_std_prereset(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* if we're about to do hardreset, nothing more to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (ehc->i.action & ATA_EH_HARDRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /* wait for !BSY if we don't know that no device is attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (!ata_link_offline(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) rc = ata_sff_wait_ready(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (rc && rc != -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ata_link_warn(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) "device not ready (errno=%d), forcing hardreset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ehc->i.action |= ATA_EH_HARDRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) EXPORT_SYMBOL_GPL(ata_sff_prereset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * ata_devchk - PATA device presence detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * @ap: ATA channel to examine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * @device: Device to examine (starting at zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * This technique was originally described in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * Hale Landis's ATADRVR (www.ata-atapi.com), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * later found its way into the ATA/ATAPI spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * Write a pattern to the ATA shadow registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * and if a device is present, it will respond by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * correctly storing and echoing back the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * ATA shadow register contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) u8 nsect, lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ap->ops->sff_dev_select(ap, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) iowrite8(0x55, ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) iowrite8(0xaa, ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) iowrite8(0xaa, ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) iowrite8(0x55, ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) iowrite8(0x55, ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) iowrite8(0xaa, ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) nsect = ioread8(ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) lbal = ioread8(ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if ((nsect == 0x55) && (lbal == 0xaa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return 1; /* we found a device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return 0; /* nothing found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * ata_sff_dev_classify - Parse returned ATA device signature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * @dev: ATA device to classify (starting at zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * @present: device seems present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) * @r_err: Value of error register on completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * an ATA/ATAPI-defined set of values is placed in the ATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * shadow registers, indicating the results of device detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * and diagnostics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * Select the ATA device, and read the values from the ATA shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * registers. Then parse according to the Error register value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * and the spec-defined values examined by ata_dev_classify().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) u8 *r_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct ata_port *ap = dev->link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct ata_taskfile tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) unsigned int class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) u8 err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ap->ops->sff_dev_select(ap, dev->devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) memset(&tf, 0, sizeof(tf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) ap->ops->sff_tf_read(ap, &tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) err = tf.feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (r_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) *r_err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* see if device passed diags: continue and warn later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* diagnostic fail : do nothing _YET_ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) else if (err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* do nothing */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) else if ((dev->devno == 0) && (err == 0x81))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* do nothing */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /* determine if device is ATA or ATAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) class = ata_dev_classify(&tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (class == ATA_DEV_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* If the device failed diagnostic, it's likely to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * have reported incorrect device signature too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * Assume ATA device if the device seems present but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * device signature is invalid with diagnostic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) class = ATA_DEV_ATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) class = ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) } else if ((class == ATA_DEV_ATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) (ap->ops->sff_check_status(ap) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) class = ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * ata_sff_wait_after_reset - wait for devices to become ready after reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * @link: SFF link which is just reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * @devmask: mask of present devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * Wait devices attached to SFF @link to become ready after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * reset. It contains preceding 150ms wait to avoid accessing TF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * status register too early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * Kernel thread context (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * 0 on success, -ENODEV if some or all of devices in @devmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * don't seem to exist. -errno on other errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) unsigned int dev0 = devmask & (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) unsigned int dev1 = devmask & (1 << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) int rc, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ata_msleep(ap, ATA_WAIT_AFTER_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* always check readiness of the master device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) rc = ata_sff_wait_ready(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* -ENODEV means the odd clown forgot the D7 pulldown resistor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * and TF status is 0xff, bail out on it too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* if device 1 was found in ata_devchk, wait for register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * access briefly, then wait for BSY to clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (dev1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ap->ops->sff_dev_select(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /* Wait for register access. Some ATAPI devices fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * to set nsect/lbal after reset, so don't waste too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * much time on it. We're gonna wait for !BSY anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) u8 nsect, lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) nsect = ioread8(ioaddr->nsect_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) lbal = ioread8(ioaddr->lbal_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if ((nsect == 1) && (lbal == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ata_msleep(ap, 50); /* give drive a breather */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) rc = ata_sff_wait_ready(link, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (rc != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) ret = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* is all this really necessary? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ap->ops->sff_dev_select(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (dev1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ap->ops->sff_dev_select(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (dev0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) ap->ops->sff_dev_select(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct ata_ioports *ioaddr = &ap->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (ap->ioaddr.ctl_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* software reset. causes dev0 to be selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) iowrite8(ap->ctl, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) udelay(20); /* FIXME: flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) udelay(20); /* FIXME: flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) iowrite8(ap->ctl, ioaddr->ctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) ap->last_ctl = ap->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* wait the port to become ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * ata_sff_softreset - reset host port via ATA SRST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * @link: ATA link to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * @classes: resulting classes of attached devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * Reset host port using ATA SRST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) unsigned int devmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) u8 err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /* determine if device 0/1 are present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (ata_devchk(ap, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) devmask |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (slave_possible && ata_devchk(ap, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) devmask |= (1 << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /* select device 0 again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ap->ops->sff_dev_select(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* issue bus reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) DPRINTK("about to softreset, devmask=%x\n", devmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) rc = ata_bus_softreset(ap, devmask, deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* if link is occupied, -ENODEV too is an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) ata_link_err(link, "SRST failed (errno=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /* determine by signature whether we have ATA or ATAPI devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) classes[0] = ata_sff_dev_classify(&link->device[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) devmask & (1 << 0), &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (slave_possible && err != 0x81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) classes[1] = ata_sff_dev_classify(&link->device[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) devmask & (1 << 1), &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) EXPORT_SYMBOL_GPL(ata_sff_softreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * sata_sff_hardreset - reset host port via SATA phy reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * @link: link to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * @class: resulting class of attached device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * @deadline: deadline jiffies for the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * SATA phy-reset host port using DET bits of SControl register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * wait for !BSY and classify the attached device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct ata_eh_context *ehc = &link->eh_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) const unsigned long *timing = sata_ehc_deb_timing(ehc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) bool online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) rc = sata_link_hardreset(link, timing, deadline, &online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) ata_sff_check_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) *class = ata_sff_dev_classify(link->device, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) DPRINTK("EXIT, class=%u\n", *class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) EXPORT_SYMBOL_GPL(sata_sff_hardreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * ata_sff_postreset - SFF postreset callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * @link: the target SFF ata_link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * @classes: classes of attached devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * This function is invoked after a successful reset. It first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * calls ata_std_postreset() and performs SFF specific postreset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct ata_port *ap = link->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ata_std_postreset(link, classes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /* is double-select really necessary? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (classes[0] != ATA_DEV_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) ap->ops->sff_dev_select(ap, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (classes[1] != ATA_DEV_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) ap->ops->sff_dev_select(ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) /* bail out if no device is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) DPRINTK("EXIT, no device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* set up device control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ata_sff_set_devctl(ap, ap->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ap->last_ctl = ap->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) EXPORT_SYMBOL_GPL(ata_sff_postreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * @qc: command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * Drain the FIFO and device of any stuck data following a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * failing to complete. In some cases this is necessary before a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * reset will recover the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* We only need to flush incoming data when a command was running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /* Drain up to 64K of data before we give up this recovery method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) && count < 65536; count += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ioread16(ap->ioaddr.data_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) /* Can become DEBUG later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * ata_sff_error_handler - Stock error handler for SFF controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * @ap: port to handle error for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * Stock error handler for SFF controller. It can handle both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * PATA and SATA controllers. Many controllers should be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * use this EH as-is or with some added handling before and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) void ata_sff_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ata_reset_fn_t softreset = ap->ops->softreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) ata_reset_fn_t hardreset = ap->ops->hardreset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) qc = __ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) qc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * We *MUST* do FIFO draining before we issue a reset as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * several devices helpfully clear their internal state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * will lock solid if we touch the data port post reset. Pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * qc in case anyone wants to do different PIO/DMA recovery or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * has per command fixups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (ap->ops->sff_drain_fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) ap->ops->sff_drain_fifo(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /* ignore built-in hardresets if SCR access is not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if ((hardreset == sata_std_hardreset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) hardreset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ap->ops->postreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) EXPORT_SYMBOL_GPL(ata_sff_error_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) * ata_sff_std_ports - initialize ioaddr with standard port offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * @ioaddr: IO address structure to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * Utility function which initializes data_addr, error_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * device_addr, status_addr, and command_addr to standard offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * relative to cmd_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) void ata_sff_std_ports(struct ata_ioports *ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) EXPORT_SYMBOL_GPL(ata_sff_std_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static int ata_resources_present(struct pci_dev *pdev, int port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /* Check the PCI resources for this channel are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) port = port * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (pci_resource_start(pdev, port + i) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) pci_resource_len(pdev, port + i) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * @host: target ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * Acquire native PCI ATA resources for @host and initialize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * first two ports of @host accordingly. Ports marked dummy are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * skipped and allocation failure makes the port dummy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * Note that native PCI resources are valid even for legacy hosts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * as we fix up pdev resources array early in boot, so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * function can be used for both native and legacy SFF hosts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * Inherited from calling layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * 0 if at least one port is initialized, -ENODEV if no port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) int ata_pci_sff_init_host(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) struct device *gdev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) struct pci_dev *pdev = to_pci_dev(gdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) /* request, iomap BARs and init port addresses accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) int base = i * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) void __iomem * const *iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (ata_port_is_dummy(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* Discard disabled ports. Some controllers show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * their unused channels this way. Disabled ports are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * made dummy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (!ata_resources_present(pdev, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) ap->ops = &ata_dummy_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) rc = pcim_iomap_regions(pdev, 0x3 << base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) dev_driver_string(gdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) dev_warn(gdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) "failed to request/iomap BARs for port %d (errno=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) i, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) pcim_pin_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ap->ops = &ata_dummy_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) host->iomap = iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ap->ioaddr.cmd_addr = iomap[base];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) ap->ioaddr.altstatus_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ap->ioaddr.ctl_addr = (void __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) ata_sff_std_ports(&ap->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) (unsigned long long)pci_resource_start(pdev, base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) (unsigned long long)pci_resource_start(pdev, base + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) mask |= 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (!mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) dev_err(gdev, "no available native port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * @pdev: target PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * @ppi: array of port_info, must be enough for two ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * @r_host: out argument for the initialized ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * all PCI resources and initialize it accordingly in one go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * Inherited from calling layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) int ata_pci_sff_prepare_host(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) const struct ata_port_info * const *ppi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct ata_host **r_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct ata_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) dev_err(&pdev->dev, "failed to allocate ATA host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) rc = ata_pci_sff_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) devres_remove_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) *r_host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) devres_release_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * @host: target SFF ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * @irq_handler: irq_handler used when requesting IRQ(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * @sht: scsi_host_template to use when registering the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * This is the counterpart of ata_host_activate() for SFF ATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * hosts. This separate helper is necessary because SFF hosts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * use two separate interrupts in legacy mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * Inherited from calling layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) int ata_pci_sff_activate_host(struct ata_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) irq_handler_t irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct scsi_host_template *sht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct device *dev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) const char *drv_name = dev_driver_string(host->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) int legacy_mode = 0, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) rc = ata_host_start(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) u8 tmp8, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * ATA spec says we should use legacy mode when one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * port is in legacy mode, but disabled ports on some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * PCI hosts appear as fixed legacy ports, e.g SB600/700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * on which the secondary port is not wired, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * ignore ports that are marked as 'dummy' during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * this check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (!ata_port_is_dummy(host->ports[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) mask |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (!ata_port_is_dummy(host->ports[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) mask |= (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if ((tmp8 & mask) != mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) legacy_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (!devres_open_group(dev, NULL, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (!legacy_mode && pdev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) rc = devm_request_irq(dev, pdev->irq, irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) IRQF_SHARED, drv_name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (ata_port_is_dummy(host->ports[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) ata_port_desc(host->ports[i], "irq %d", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) } else if (legacy_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (!ata_port_is_dummy(host->ports[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) irq_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) drv_name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) ata_port_desc(host->ports[0], "irq %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) ATA_PRIMARY_IRQ(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (!ata_port_is_dummy(host->ports[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) irq_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) drv_name, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) ata_port_desc(host->ports[1], "irq %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ATA_SECONDARY_IRQ(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) rc = ata_host_register(host, sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) devres_remove_group(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) devres_release_group(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) static const struct ata_port_info *ata_sff_find_valid_pi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) const struct ata_port_info * const *ppi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) /* look up the first valid port_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) for (i = 0; i < 2 && ppi[i]; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (ppi[i]->port_ops != &ata_dummy_port_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return ppi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) static int ata_pci_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) const struct ata_port_info * const *ppi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) struct scsi_host_template *sht, void *host_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) int hflags, bool bmdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) const struct ata_port_info *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct ata_host *host = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) pi = ata_sff_find_valid_pi(ppi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (!pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) dev_err(&pdev->dev, "no valid port_info specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (!devres_open_group(dev, NULL, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) #ifdef CONFIG_ATA_BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (bmdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) /* prepare and activate BMDMA host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /* prepare and activate SFF host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) host->private_data = host_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) host->flags |= hflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) #ifdef CONFIG_ATA_BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (bmdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) devres_remove_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) devres_release_group(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * @pdev: Controller to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * @ppi: array of port_info, must be enough for two ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * @sht: scsi_host_template to use when registering the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * @host_priv: host private_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * @hflag: host flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * This is a helper function which can be called from a driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * xxx_init_one() probe function if the hardware uses traditional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * IDE taskfile registers and is PIO only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * ASSUMPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * Nobody makes a single channel controller that appears solely as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * the secondary legacy port on PCI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * Inherited from PCI layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * Zero on success, negative on errno-based value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) int ata_pci_sff_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) const struct ata_port_info * const *ppi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct scsi_host_template *sht, void *host_priv, int hflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) #endif /* CONFIG_PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * BMDMA support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) #ifdef CONFIG_ATA_BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) const struct ata_port_operations ata_bmdma_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) .inherits = &ata_sff_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) .error_handler = ata_bmdma_error_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) .post_internal_cmd = ata_bmdma_post_internal_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) .qc_prep = ata_bmdma_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) .qc_issue = ata_bmdma_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) .sff_irq_clear = ata_bmdma_irq_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) .bmdma_setup = ata_bmdma_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) .bmdma_start = ata_bmdma_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) .bmdma_stop = ata_bmdma_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) .bmdma_status = ata_bmdma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) .port_start = ata_bmdma_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) const struct ata_port_operations ata_bmdma32_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) .inherits = &ata_bmdma_port_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) .sff_data_xfer = ata_sff_data_xfer32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) .port_start = ata_bmdma_port_start32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * ata_bmdma_fill_sg - Fill PCI IDE PRD table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * @qc: Metadata associated with taskfile to be transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * Fill PCI IDE PRD (scatter-gather) table with segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) * associated with the current disk command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct ata_bmdma_prd *prd = ap->bmdma_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) unsigned int si, pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) u32 addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) u32 sg_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) /* determine if physical DMA addr spans 64K boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) * Note h/w doesn't support 64-bit, so we unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) * truncate dma_addr_t to u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) addr = (u32) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) sg_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) while (sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) offset = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if ((offset + sg_len) > 0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) len = 0x10000 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) prd[pi].addr = cpu_to_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) prd[pi].flags_len = cpu_to_le32(len & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) pi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sg_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * @qc: Metadata associated with taskfile to be transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * Fill PCI IDE PRD (scatter-gather) table with segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * associated with the current disk command. Perform the fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) * so that we avoid writing any length 64K records for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) * controllers that don't follow the spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct ata_bmdma_prd *prd = ap->bmdma_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) unsigned int si, pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) u32 addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) u32 sg_len, len, blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) /* determine if physical DMA addr spans 64K boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * Note h/w doesn't support 64-bit, so we unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * truncate dma_addr_t to u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) addr = (u32) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) sg_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) while (sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) offset = addr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if ((offset + sg_len) > 0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) len = 0x10000 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) blen = len & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) prd[pi].addr = cpu_to_le32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (blen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) /* Some PATA chipsets like the CS5530 can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) cope with 0x0000 meaning 64K as the spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) says */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) prd[pi].flags_len = cpu_to_le32(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) blen = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) prd[++pi].addr = cpu_to_le32(addr + 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) prd[pi].flags_len = cpu_to_le32(blen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) pi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) sg_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * ata_bmdma_qc_prep - Prepare taskfile for submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * @qc: Metadata associated with taskfile to be prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * Prepare ATA taskfile for submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) ata_bmdma_fill_sg(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * @qc: Metadata associated with taskfile to be prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * Prepare ATA taskfile for submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (!(qc->flags & ATA_QCFLAG_DMAMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) ata_bmdma_fill_sg_dumb(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) return AC_ERR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) * @qc: command to issue to device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) * This function issues a PIO, NODATA or DMA command to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) * SFF/BMDMA controller. PIO and NODATA are handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) * ata_sff_qc_issue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * Zero on success, AC_ERR_* mask on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct ata_link *link = qc->dev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) /* defer PIO handling to sff_qc_issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (!ata_is_dma(qc->tf.protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return ata_sff_qc_issue(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) /* select the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) ata_dev_select(ap, qc->dev->devno, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /* start the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) ap->ops->bmdma_setup(qc); /* set up bmdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) ap->ops->bmdma_start(qc); /* initiate bmdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ap->hsm_task_state = HSM_ST_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ap->ops->bmdma_setup(qc); /* set up bmdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) ap->hsm_task_state = HSM_ST_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /* send cdb by polling if no cdb interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) ata_sff_queue_pio_task(link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * ata_bmdma_port_intr - Handle BMDMA port interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * @ap: Port on which interrupt arrived (possibly...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) * @qc: Taskfile currently active in engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) * Handle port interrupt for given queued command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * One if interrupt was handled, zero if not (shared irq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) struct ata_eh_info *ehi = &ap->link.eh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) u8 host_stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) bool bmdma_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) unsigned int handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) /* check status of DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) host_stat = ap->ops->bmdma_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) /* if it's not our irq... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (!(host_stat & ATA_DMA_INTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) return ata_sff_idle_irq(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) /* before we do anything else, clear DMA-Start bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) ap->ops->bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) bmdma_stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (unlikely(host_stat & ATA_DMA_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) /* error when transferring data to/from memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) qc->err_mask |= AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) ap->hsm_task_state = HSM_ST_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) * @irq: irq line (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * @dev_instance: pointer to our ata_host information structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * Default interrupt handler for PCI IDE devices. Calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * ata_bmdma_port_intr() for each port that is not disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * Obtains host lock during operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) * IRQ_NONE or IRQ_HANDLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * ata_bmdma_error_handler - Stock error handler for BMDMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * @ap: port to handle error for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * Stock error handler for BMDMA controller. It can handle both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * PATA and SATA controllers. Most BMDMA controllers should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * able to use this EH as-is or with some added handling before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * and after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) void ata_bmdma_error_handler(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct ata_queued_cmd *qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) bool thaw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) qc = __ata_qc_from_tag(ap, ap->link.active_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) qc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) /* reset PIO HSM and stop DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (qc && ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) u8 host_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) host_stat = ap->ops->bmdma_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) /* BMDMA controllers indicate host bus error by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * setting DMA_ERR bit and timing out. As it wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) * really a timeout event, adjust error mask and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) * cancel frozen state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) qc->err_mask = AC_ERR_HOST_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) thaw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) ap->ops->bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) /* if we're gonna thaw, make sure IRQ is clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (thaw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) ap->ops->sff_check_status(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (ap->ops->sff_irq_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) ap->ops->sff_irq_clear(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if (thaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) ata_eh_thaw_port(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) ata_sff_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) * @qc: internal command to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * Kernel thread context (may sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (ata_is_dma(qc->tf.protocol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) spin_lock_irqsave(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) ap->ops->bmdma_stop(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) spin_unlock_irqrestore(ap->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) * @ap: Port associated with this ATA transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * Clear interrupt and error flags in DMA status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * May be used as the irq_clear() entry in ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) void ata_bmdma_irq_clear(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) void __iomem *mmio = ap->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if (!mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * @qc: Info associated with this ATA transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) void ata_bmdma_setup(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) u8 dmactl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) /* load PRD table addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) mb(); /* make sure PRD table writes are visible to controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) /* specify data direction, triple-check start bit is clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) if (!rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) dmactl |= ATA_DMA_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) /* issue r/w command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ap->ops->sff_exec_command(ap, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) EXPORT_SYMBOL_GPL(ata_bmdma_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) * ata_bmdma_start - Start a PCI IDE BMDMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) * @qc: Info associated with this ATA transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) void ata_bmdma_start(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) u8 dmactl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) /* start host DMA transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* Strictly, one may wish to issue an ioread8() here, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) * flush the mmio write. However, control also passes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) * to the hardware at this point, and it will interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) * us when we are to resume control. So, in effect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) * we don't care when the mmio write flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) * Further, a read of the DMA status register _immediately_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) * following the write may not be what certain flaky hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) * is expected, so I think it is best to not add a readb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) * without first all the MMIO ATA cards/mobos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) * Or maybe I'm just being paranoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * FIXME: The posting of this write means I/O starts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * unnecessarily delayed for MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) EXPORT_SYMBOL_GPL(ata_bmdma_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * @qc: Command we are ending DMA for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) * Clears the ATA_DMA_START flag in the dma control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * May be used as the bmdma_stop() entry in ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) void ata_bmdma_stop(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) void __iomem *mmio = ap->ioaddr.bmdma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) /* clear start/stop bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) mmio + ATA_DMA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) ata_sff_dma_pause(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) EXPORT_SYMBOL_GPL(ata_bmdma_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) * ata_bmdma_status - Read PCI IDE BMDMA status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) * @ap: Port associated with this ATA transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) * Read and return BMDMA status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) * May be used as the bmdma_status() entry in ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * spin_lock_irqsave(host lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) u8 ata_bmdma_status(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) EXPORT_SYMBOL_GPL(ata_bmdma_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) * ata_bmdma_port_start - Set port up for bmdma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) * @ap: Port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) * Called just after data structures for each port are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * initialized. Allocates space for PRD table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * May be used as the port_start() entry in ata_port_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) int ata_bmdma_port_start(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) if (ap->mwdma_mask || ap->udma_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) ap->bmdma_prd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) &ap->bmdma_prd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (!ap->bmdma_prd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) * ata_bmdma_port_start32 - Set port up for dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * @ap: Port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) * Called just after data structures for each port are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) * initialized. Enables 32bit PIO and allocates space for PRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) * table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) * May be used as the port_start() entry in ata_port_operations for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * devices that are capable of 32bit PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * Inherited from caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) int ata_bmdma_port_start32(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) return ata_bmdma_port_start(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) * @pdev: PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) * Some PCI ATA devices report simplex mode but in fact can be told to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * enter non simplex mode. This implements the necessary logic to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) * perform the task on such devices. Calling it on other devices will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) * have -undefined- behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) unsigned long bmdma = pci_resource_start(pdev, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) u8 simplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (bmdma == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) simplex = inb(bmdma + 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) outb(simplex & 0x60, bmdma + 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) simplex = inb(bmdma + 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) if (simplex & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) host->ports[i]->mwdma_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) host->ports[i]->udma_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) * @host: target ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * Acquire PCI BMDMA resources and initialize @host accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) * Inherited from calling layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) void ata_pci_bmdma_init(struct ata_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct device *gdev = host->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) struct pci_dev *pdev = to_pci_dev(gdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /* No BAR4 allocation: No DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (pci_resource_start(pdev, 4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) ata_bmdma_nodma(host, "BAR4 is zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) * Some controllers require BMDMA region to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) * even if DMA is not in use to clear IRQ status via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) * ->sff_irq_clear method. Try to initialize bmdma_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) * regardless of dma masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) ata_bmdma_nodma(host, "failed to set dma mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /* request and iomap DMA region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ata_bmdma_nodma(host, "failed to request/iomap BAR4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) host->iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) struct ata_port *ap = host->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) void __iomem *bmdma = host->iomap[4] + 8 * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (ata_port_is_dummy(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) ap->ioaddr.bmdma_addr = bmdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) (ioread8(bmdma + 2) & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) host->flags |= ATA_HOST_SIMPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ata_port_desc(ap, "bmdma 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * @pdev: target PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * @ppi: array of port_info, must be enough for two ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * @r_host: out argument for the initialized ATA host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) * resources and initialize it accordingly in one go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * Inherited from calling layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * 0 on success, -errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) const struct ata_port_info * const * ppi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) struct ata_host **r_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) ata_pci_bmdma_init(*r_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) * @pdev: Controller to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * @ppi: array of port_info, must be enough for two ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * @sht: scsi_host_template to use when registering the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) * @host_priv: host private_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * @hflags: host flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) * This function is similar to ata_pci_sff_init_one() but also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * takes care of BMDMA initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * Inherited from PCI layer (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) * Zero on success, negative on errno-based value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) int ata_pci_bmdma_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) const struct ata_port_info * const * ppi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) struct scsi_host_template *sht, void *host_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) int hflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) #endif /* CONFIG_PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) #endif /* CONFIG_ATA_BMDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) * ata_sff_port_init - Initialize SFF/BMDMA ATA port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) * @ap: Port to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) * Called on port allocation to initialize SFF/BMDMA specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) * None.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) void ata_sff_port_init(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) ap->ctl = ATA_DEVCTL_OBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) ap->last_ctl = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) int __init ata_sff_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) if (!ata_sff_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) void ata_sff_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) destroy_workqueue(ata_sff_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }