^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/ide.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) int generic_ide_suspend(struct device *dev, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) ide_drive_t *drive = to_ide_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) ide_drive_t *pair = ide_get_pair_dev(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct ide_pm_state rqpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) if (ide_port_acpi(hwif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* call ACPI _GTM only once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) if ((drive->dn & 1) == 0 || pair == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ide_acpi_get_timing(hwif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) memset(&rqpm, 0, sizeof(rqpm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ide_req(rq)->special = &rqpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) rqpm.pm_step = IDE_PM_START_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (mesg.event == PM_EVENT_PRETHAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) mesg.event = PM_EVENT_FREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) rqpm.pm_state = mesg.event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) blk_execute_rq(drive->queue, NULL, rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ret = scsi_req(rq)->result ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) blk_put_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (ret == 0 && ide_port_acpi(hwif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* call ACPI _PS3 only after both devices are suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if ((drive->dn & 1) || pair == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) ide_acpi_set_state(hwif, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int ide_pm_execute_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct request_queue *q = rq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (unlikely(blk_queue_dying(q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) rq->rq_flags |= RQF_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) scsi_req(rq)->result = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) blk_mq_end_request(rq, BLK_STS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) blk_execute_rq(q, NULL, rq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return scsi_req(rq)->result ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int generic_ide_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ide_drive_t *drive = to_ide_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ide_drive_t *pair = ide_get_pair_dev(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct ide_pm_state rqpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) blk_mq_start_stopped_hw_queues(drive->queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (ide_port_acpi(hwif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* call ACPI _PS0 / _STM only once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if ((drive->dn & 1) == 0 || pair == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ide_acpi_set_state(hwif, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ide_acpi_push_timing(hwif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ide_acpi_exec_tfs(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) memset(&rqpm, 0, sizeof(rqpm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ide_req(rq)->type = ATA_PRIV_PM_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ide_req(rq)->special = &rqpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) rqpm.pm_step = IDE_PM_START_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rqpm.pm_state = PM_EVENT_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) err = ide_pm_execute_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) blk_put_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (err == 0 && dev->driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct ide_driver *drv = to_ide_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (drv->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) drv->resume(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct ide_pm_state *pm = ide_req(rq)->special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef DEBUG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) drive->name, pm->pm_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (drive->media != ide_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) switch (pm->pm_step) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (pm->pm_state == PM_EVENT_FREEZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pm->pm_step = IDE_PM_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pm->pm_step = IDE_PM_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pm->pm_step = IDE_PM_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pm->pm_step = IDE_PM_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case IDE_PM_IDLE: /* Resume step 2 (idle)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pm->pm_step = IDE_PM_RESTORE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct ide_pm_state *pm = ide_req(rq)->special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct ide_cmd cmd = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) switch (pm->pm_step) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (drive->media != ide_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Not supported? Switch to next step now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ata_id_flush_enabled(drive->id) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ide_complete_power_step(drive, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return ide_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (ata_id_flush_ext_enabled(drive->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cmd.tf.command = ATA_CMD_FLUSH_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) cmd.tf.command = ATA_CMD_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto out_do_tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cmd.tf.command = ATA_CMD_STANDBYNOW1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto out_do_tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ide_set_max_pio(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * skip IDE_PM_IDLE for ATAPI devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (drive->media != ide_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pm->pm_step = IDE_PM_RESTORE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ide_complete_power_step(drive, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ide_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) case IDE_PM_IDLE: /* Resume step 2 (idle) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto out_do_tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Right now, all we do is call ide_set_dma(drive),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * we could be smarter and check for current xfer_speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * in struct drive etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (drive->hwif->dma_ops == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * TODO: respect IDE_DFLAG_USING_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ide_set_dma(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pm->pm_step = IDE_PM_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ide_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) out_do_tf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cmd.protocol = ATA_PROT_NODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return do_rw_taskfile(drive, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * ide_complete_pm_rq - end the current Power Management request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @drive: target drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @rq: request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * This function cleans up the current PM request and stops the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct request_queue *q = drive->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct ide_pm_state *pm = ide_req(rq)->special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ide_complete_power_step(drive, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (pm->pm_step != IDE_PM_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef DEBUG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) printk("%s: completing PM request, %s\n", drive->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) blk_mq_stop_hw_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) drive->hwif->rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) blk_mq_end_request(rq, BLK_STS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct ide_pm_state *pm = ide_req(rq)->special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (blk_rq_is_private(rq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pm->pm_step == IDE_PM_START_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Mark drive blocked when starting the suspend sequence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) drive->dev_flags |= IDE_DFLAG_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) else if (blk_rq_is_private(rq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pm->pm_step == IDE_PM_START_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * The first thing we do on wakeup is to wait for BSY bit to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * go away (with a looong timeout) as a drive on this hwif may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * just be POSTing itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * We do that before even selecting as the "other" device on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * the bus may be broken enough to walk on our toes at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ide_hwif_t *hwif = drive->hwif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) const struct ide_tp_ops *tp_ops = hwif->tp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct request_queue *q = drive->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef DEBUG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rc = ide_wait_not_busy(hwif, 35000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) tp_ops->dev_select(drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rc = ide_wait_not_busy(hwif, 100000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) blk_mq_start_hw_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }