^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Aic94xx Task Management Functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "aic94xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "aic94xx_sas.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "aic94xx_hwi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* ---------- Internal enqueue ---------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static int asd_enqueue_internal(struct asd_ascb *ascb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void (*tasklet_complete)(struct asd_ascb *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct done_list_struct *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void (*timed_out)(struct timer_list *t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ascb->tasklet_complete = tasklet_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) ascb->uldd_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ascb->timer.function = timed_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) add_timer(&ascb->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) res = asd_post_ascb_list(ascb->ha, ascb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (unlikely(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) del_timer(&ascb->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* ---------- CLEAR NEXUS ---------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct tasklet_completion_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int dl_opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int tmf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u8 tag_valid:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __be16 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DECLARE_TCS(tcs) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct tasklet_completion_status tcs = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .dl_opcode = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .tmf_state = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .tag_valid = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .tag = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct done_list_struct *dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct tasklet_completion_status *tcs = ascb->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ASD_DPRINTK("%s: here\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!del_timer(&ascb->timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) tcs->dl_opcode = dl->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) complete(ascb->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) asd_ascb_free(ascb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void asd_clear_nexus_timedout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct asd_ascb *ascb = from_timer(ascb, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct tasklet_completion_status *tcs = ascb->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ASD_DPRINTK("%s: here\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) complete(ascb->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define CLEAR_NEXUS_PRE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct asd_ascb *ascb; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct scb *scb; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DECLARE_COMPLETION_ONSTACK(completion); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) DECLARE_TCS(tcs); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ASD_DPRINTK("%s: PRE\n", __func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) res = 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!ascb) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return -ENOMEM; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ascb->completion = &completion; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ascb->uldd_task = &tcs; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) scb = ascb->scb; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) scb->header.opcode = CLEAR_NEXUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define CLEAR_NEXUS_POST \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ASD_DPRINTK("%s: POST\n", __func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) asd_clear_nexus_timedout); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto out_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) wait_for_completion(&completion); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) res = tcs.dl_opcode; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (res == TC_NO_ERROR) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) res = TMF_RESP_FUNC_COMPLETE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return res; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) out_err: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) asd_ascb_free(ascb); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) scb->clear_nexus.nexus = NEXUS_ADAPTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int asd_clear_nexus_port(struct asd_sas_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) scb->clear_nexus.nexus = NEXUS_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) scb->clear_nexus.conn_mask = port->phy_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) enum clear_nexus_phase {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) NEXUS_PHASE_PRE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) NEXUS_PHASE_POST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) NEXUS_PHASE_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int asd_clear_nexus_I_T(struct domain_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) enum clear_nexus_phase phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) scb->clear_nexus.nexus = NEXUS_I_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) switch (phase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case NEXUS_PHASE_PRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) case NEXUS_PHASE_POST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) scb->clear_nexus.flags = SEND_Q | NOTINQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) case NEXUS_PHASE_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) scb->clear_nexus.flags = RESUME_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int asd_I_T_nexus_reset(struct domain_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int res, tmp_res, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sas_phy *phy = sas_get_local_phy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Standard mandates link reset for ATA (type 0) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * hard reset for SSP (type 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int reset_type = (dev->dev_type == SAS_SATA_DEV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* send a hard reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ASD_DPRINTK("sending %s reset to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) reset_type ? "hard" : "soft", dev_name(&phy->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) res = sas_phy_reset(phy, reset_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* wait for the maximum settle time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* clear all outstanding commands (keep nexus suspended) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = 0 ; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (tmp_res == TC_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* This is a bit of a problem: the sequencer is still suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * and is refusing to resume. Hope it will resume on a bigger hammer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * or the disk is lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dev_printk(KERN_ERR, &phy->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) "Failed to resume nexus after reset 0x%x\n", tmp_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sas_put_local_phy(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) scb->clear_nexus.nexus = NEXUS_I_T_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int asd_clear_nexus_tag(struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct asd_ascb *tascb = task->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) scb->clear_nexus.nexus = NEXUS_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) scb->clear_nexus.ssp_task.tag = tascb->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (task->dev->tproto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) task->dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int asd_clear_nexus_index(struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct asd_ascb *tascb = task->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) CLEAR_NEXUS_PRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) scb->clear_nexus.nexus = NEXUS_TRANS_CX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (task->dev->tproto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) task->dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) CLEAR_NEXUS_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* ---------- TMFs ---------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void asd_tmf_timedout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct asd_ascb *ascb = from_timer(ascb, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct tasklet_completion_status *tcs = ascb->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ASD_DPRINTK("tmf timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) tcs->tmf_state = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) complete(ascb->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct done_list_struct *dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct asd_ha_struct *asd_ha = ascb->ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct tc_resp_sb_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __le16 index_escb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u8 len_lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct asd_ascb *escb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct asd_dma_tok *edb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct ssp_frame_hdr *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct ssp_response_iu *ru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ASD_DPRINTK("tmf resp tasklet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) escb = asd_tc_index_find(&asd_ha->seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) (int)le16_to_cpu(resp_sb->index_escb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!escb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ascb->tag = *(__be16 *)(edb->vaddr+4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fh = edb->vaddr + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ru = edb->vaddr + 16 + sizeof(*fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) res = ru->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ru->datapres == 1) /* Response data present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) res = ru->resp_data[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ascb->tag = fh->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ascb->tag_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) asd_invalidate_edb(escb, edb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct done_list_struct *dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct tasklet_completion_status *tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!del_timer(&ascb->timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tcs = ascb->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ASD_DPRINTK("tmf tasklet complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) tcs->dl_opcode = dl->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (dl->opcode == TC_SSP_RESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tcs->tag_valid = ascb->tag_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tcs->tag = ascb->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) complete(ascb->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) asd_ascb_free(ascb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int asd_clear_nexus(struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct asd_ascb *tascb = task->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DECLARE_COMPLETION_ONSTACK(completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) tascb->completion = &completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ASD_DPRINTK("task not done, clearing nexus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (tascb->tag_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) res = asd_clear_nexus_tag(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) res = asd_clear_nexus_index(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) leftover = wait_for_completion_timeout(&completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) AIC94XX_SCB_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) tascb->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ASD_DPRINTK("came back from clear nexus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_lock_irqsave(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (leftover < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (task->task_state_flags & SAS_TASK_STATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * asd_abort_task -- ABORT TASK TMF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @task: the task to be aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Before calling ABORT TASK the task state flags should be ORed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Implements the ABORT TASK TMF, I_T_L_Q nexus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Returns: SAS TMF responses (see sas_task.h),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * -ENOMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * -SAS_QUEUE_FULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * When ABORT TASK returns, the caller of ABORT TASK checks first the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * task->task_state_flags, and then the return value of ABORT TASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * If the task has task state bit SAS_TASK_STATE_DONE set, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * task was completed successfully prior to it being aborted. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * caller of ABORT TASK has responsibility to call task->task_done()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * xor free the task, depending on their framework. The return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * is TMF_RESP_FUNC_FAILED in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Else the SAS_TASK_STATE_DONE bit is not set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * If the return code is TMF_RESP_FUNC_COMPLETE, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * the task was aborted successfully. The caller of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * ABORT TASK has responsibility to call task->task_done()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * to finish the task, xor free the task depending on their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * the ABORT TASK returned some kind of error. The task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * was _not_ cancelled. Nothing can be assumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * The caller of ABORT TASK may wish to retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int asd_abort_task(struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct asd_ascb *tascb = task->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct asd_ha_struct *asd_ha = tascb->ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct asd_ascb *ascb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct scb *scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) DECLARE_TCS(tcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) DECLARE_COMPLETION_ONSTACK(completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) DECLARE_COMPLETION_ONSTACK(tascb_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tascb->completion = &tascb_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) spin_lock_irqsave(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (task->task_state_flags & SAS_TASK_STATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) goto out_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!ascb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ascb->uldd_task = &tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ascb->completion = &completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) scb = ascb->scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) scb->header.opcode = SCB_ABORT_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) switch (task->task_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) case SAS_PROTOCOL_SATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case SAS_PROTOCOL_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case SAS_PROTOCOL_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) scb->abort_task.proto_conn_rate |= task->dev->linkrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case SAS_PROTOCOL_SMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (task->task_proto == SAS_PROTOCOL_SSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) scb->abort_task.ssp_frame.frame_type = SSP_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) task->dev->port->ha->hashed_sas_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) HASHED_SAS_ADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) scb->abort_task.conn_handle = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) (u16)(unsigned long)task->dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) scb->abort_task.retry_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) asd_tmf_timedout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) wait_for_completion(&completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ASD_DPRINTK("tmf came back\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tascb->tag = tcs.tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tascb->tag_valid = tcs.tag_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) spin_lock_irqsave(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (task->task_state_flags & SAS_TASK_STATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto out_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (tcs.dl_opcode == TC_SSP_RESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* The task to be aborted has been sent to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * We got a Response IU for the ABORT TASK TMF. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) res = asd_clear_nexus(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) res = tcs.tmf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) } else if (tcs.dl_opcode == TC_NO_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* In the following we assume that the managing layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * will _never_ make a mistake, when issuing ABORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * TASK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) switch (tcs.dl_opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) res = asd_clear_nexus(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) case TC_NO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* The task hasn't been sent to the device xor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * we never got a (sane) Response IU for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * ABORT TASK TMF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) case TF_NAK_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) res = TMF_RESP_INVALID_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case TF_TMF_TASK_DONE: /* done but not reported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) leftover =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) wait_for_completion_timeout(&tascb_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) AIC94XX_SCB_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_lock_irqsave(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (leftover < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (task->task_state_flags & SAS_TASK_STATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case TF_TMF_NO_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case TF_TMF_TAG_FREE: /* the tag is in the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case TF_TMF_NO_CONN_HANDLE: /* no such device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) res = TMF_RESP_FUNC_ESUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) out_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) tascb->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (res == TMF_RESP_FUNC_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) task->lldd_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) asd_ascb_free(tascb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) asd_ascb_free(ascb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * @dev: pointer to struct domain_device of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * @lun: pointer to u8[8] which is the LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * @index: the transaction context of the task to be queried if QT TMF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * This function is used to send ABORT TASK SET, CLEAR ACA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * No SCBs should be queued to the I_T_L nexus when this SCB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Returns: TMF response code (see sas_task.h or the SAS spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int tmf, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct asd_ascb *ascb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct scb *scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) DECLARE_COMPLETION_ONSTACK(completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) DECLARE_TCS(tcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!(dev->tproto & SAS_PROTOCOL_SSP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return TMF_RESP_FUNC_ESUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!ascb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ascb->completion = &completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ascb->uldd_task = &tcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) scb = ascb->scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (tmf == TMF_QUERY_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) scb->header.opcode = QUERY_SSP_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) scb->header.opcode = INITIATE_SSP_TMF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* SSP frame header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* SSP Task IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) scb->ssp_tmf.ssp_task.tmf = tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dev->lldd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) scb->ssp_tmf.retry_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (tmf == TMF_QUERY_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) scb->ssp_tmf.index = cpu_to_le16(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) asd_tmf_timedout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) wait_for_completion(&completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) switch (tcs.dl_opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case TC_NO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case TF_NAK_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) res = TMF_RESP_INVALID_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case TF_TMF_TASK_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) res = TMF_RESP_FUNC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) case TF_TMF_NO_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) case TF_TMF_TAG_FREE: /* the tag is in the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) case TF_TMF_NO_CONN_HANDLE: /* no such device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) res = TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) res = TMF_RESP_FUNC_ESUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Allow TMF response codes to propagate upwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) res = tcs.dl_opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) asd_ascb_free(ascb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int asd_abort_task_set(struct domain_device *dev, u8 *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (res == TMF_RESP_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) asd_clear_nexus_I_T_L(dev, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int asd_clear_aca(struct domain_device *dev, u8 *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (res == TMF_RESP_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) asd_clear_nexus_I_T_L(dev, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int asd_clear_task_set(struct domain_device *dev, u8 *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (res == TMF_RESP_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) asd_clear_nexus_I_T_L(dev, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int asd_lu_reset(struct domain_device *dev, u8 *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (res == TMF_RESP_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) asd_clear_nexus_I_T_L(dev, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @task: pointer to sas_task struct of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * or TMF_RESP_FUNC_SUCC if the task is in the task set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Normally the management layer sets the task to aborted state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * and then calls query task and then abort task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int asd_query_task(struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct asd_ascb *ascb = task->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (ascb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) index = ascb->tc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) TMF_QUERY_TASK, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return TMF_RESP_FUNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }