^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The full GNU General Public License is included in this distribution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * in the file called LICENSE.GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * * Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * * Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "isci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "task.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "request.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "scu_completion_codes.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "scu_event_codes.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "sas.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #undef C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define C(a) (#a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) const char *req_state_name(enum sci_base_request_states state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static const char * const strings[] = REQUEST_STATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return strings[state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #undef C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return &ireq->tc->sgl_pair_ab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) else if (idx == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return &ireq->tc->sgl_pair_cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) else if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return &ireq->sg_table[idx - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct isci_request *ireq, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (idx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) offset = (void *) &ireq->tc->sgl_pair_ab -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) (void *) &ihost->task_context_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return ihost->tc_dma + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else if (idx == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) offset = (void *) &ireq->tc->sgl_pair_cd -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) (void *) &ihost->task_context_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ihost->tc_dma + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) e->length = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) e->address_upper = upper_32_bits(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) e->address_lower = lower_32_bits(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) e->address_modifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void sci_request_build_sgl(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct isci_host *ihost = ireq->isci_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct scatterlist *sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 sg_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct scu_sgl_element_pair *scu_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct scu_sgl_element_pair *prev_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (task->num_scatter > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sg = task->scatter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) while (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) scu_sg = to_sgl_element_pair(ireq, sg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) init_sgl_element(&scu_sg->A, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) init_sgl_element(&scu_sg->B, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) memset(&scu_sg->B, 0, sizeof(scu_sg->B));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (prev_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dma_addr = to_sgl_element_pair_dma(ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) sg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) prev_sg->next_pair_upper =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) upper_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) prev_sg->next_pair_lower =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) prev_sg = scu_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sg_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } else { /* handle when no sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) scu_sg = to_sgl_element_pair(ireq, sg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_addr = dma_map_single(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) task->scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) task->total_xfer_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) task->data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ireq->zero_scatter_daddr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) scu_sg->A.length = task->total_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) scu_sg->A.address_upper = upper_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) scu_sg->A.address_lower = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (scu_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) scu_sg->next_pair_upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) scu_sg->next_pair_lower = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct ssp_cmd_iu *cmd_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cmd_iu = &ireq->ssp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cmd_iu->add_cdb_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cmd_iu->_r_a = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) cmd_iu->_r_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cmd_iu->en_fburst = 0; /* unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) cmd_iu->task_prio = task->ssp_task.task_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cmd_iu->task_attr = task->ssp_task.task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) cmd_iu->_r_c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct ssp_task_iu *task_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) task_iu = &ireq->ssp.tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) memset(task_iu, 0, sizeof(struct ssp_task_iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) task_iu->task_func = isci_tmf->tmf_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) task_iu->task_tag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) (test_bit(IREQ_TMF, &ireq->flags)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) isci_tmf->io_tag :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) SCI_CONTROLLER_INVALID_IO_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * This method is will fill in the SCU Task Context for any type of SSP request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @sci_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @task_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void scu_ssp_request_construct_task_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct scu_task_context *task_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct isci_remote_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct isci_port *iport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) idev = ireq->target_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) iport = idev->owning_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Fill in the TC with its required data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) task_context->abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) task_context->priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) task_context->initiator_request = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) task_context->connection_rate = idev->connection_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) task_context->protocol_engine_index = ISCI_PEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) task_context->logical_port_index = iport->physical_port_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) task_context->valid = SCU_TASK_CONTEXT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) task_context->context_type = SCU_TASK_CONTEXT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) task_context->remote_node_index = idev->rnc.remote_node_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) task_context->command_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) task_context->link_layer_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) task_context->do_not_dma_ssp_good_response = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) task_context->strict_ordering = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) task_context->control_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) task_context->timeout_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) task_context->block_guard_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) task_context->address_modifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* task_context->type.ssp.tag = ireq->io_tag; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) task_context->task_phase = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) (iport->physical_port_index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ISCI_TAG_TCI(ireq->io_tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Copy the physical address for the command buffer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * SCU Task Context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) task_context->command_iu_upper = upper_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) task_context->command_iu_lower = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Copy the physical address for the response buffer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * SCU Task Context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) task_context->response_iu_upper = upper_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) task_context->response_iu_lower = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static u8 scu_bg_blk_size(struct scsi_device *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (sdp->sector_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case 512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case 4096:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static u32 scu_dif_bytes(u32 len, u32 sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return (len >> ilog2(sector_size)) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct scu_task_context *tc = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u8 blk_sz = scu_bg_blk_size(scmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) tc->block_guard_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) tc->blk_prot_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tc->blk_sz = blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* DIF write insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tc->blk_prot_func = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) scmd->device->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* always init to 0, used by hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) tc->interm_crc_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) tc->init_crc_seed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tc->app_tag_verify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tc->app_tag_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) tc->ref_tag_seed_verify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* always init to same as bg_blk_sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tc->UD_bytes_immed_val = scmd->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) tc->reserved_DC_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* always init to 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) tc->DIF_bytes_immed_val = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) tc->reserved_DC_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) tc->bgc_blk_sz = scmd->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tc->reserved_E0_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tc->app_tag_gen_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /** setup block guard control **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) tc->bgctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* DIF write insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) tc->bgctl_f.op = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) tc->app_tag_verify_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* must init to 0 for hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) tc->blk_guard_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) tc->reserved_E8_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) else if (type & SCSI_PROT_DIF_TYPE3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) tc->ref_tag_seed_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct scu_task_context *tc = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u8 blk_sz = scu_bg_blk_size(scmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) tc->block_guard_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tc->blk_prot_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) tc->blk_sz = blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* DIF read strip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tc->blk_prot_func = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) scmd->device->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* always init to 0, used by hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tc->interm_crc_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) tc->init_crc_seed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) tc->app_tag_verify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tc->app_tag_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) else if (type & SCSI_PROT_DIF_TYPE3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) tc->ref_tag_seed_verify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* always init to same as bg_blk_sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) tc->UD_bytes_immed_val = scmd->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) tc->reserved_DC_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* always init to 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tc->DIF_bytes_immed_val = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tc->reserved_DC_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) tc->bgc_blk_sz = scmd->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) tc->reserved_E0_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) tc->app_tag_gen_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /** setup block guard control **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tc->bgctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* DIF read strip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) tc->bgctl_f.crc_verify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) tc->bgctl_f.op = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tc->bgctl_f.ref_tag_chk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tc->bgctl_f.app_f_detect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) } else if (type & SCSI_PROT_DIF_TYPE3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) tc->bgctl_f.app_ref_f_detect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) tc->app_tag_verify_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* must init to 0 for hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) tc->blk_guard_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) tc->reserved_E8_0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tc->ref_tag_seed_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * This method is will fill in the SCU Task Context for a SSP IO request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @sci_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct scsi_cmnd *scmd = sas_task->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u8 prot_type = scsi_get_prot_type(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u8 prot_op = scsi_get_prot_op(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) scu_ssp_request_construct_task_context(ireq, task_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) task_context->ssp_command_iu_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sizeof(struct ssp_cmd_iu) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) task_context->type.ssp.frame_type = SSP_COMMAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) task_context->task_type = SCU_TASK_TYPE_IOREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) task_context->task_type = SCU_TASK_TYPE_IOWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) task_context->transfer_length_bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (task_context->transfer_length_bytes > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sci_request_build_sgl(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (prot_type != SCSI_PROT_DIF_TYPE0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (prot_op == SCSI_PROT_READ_STRIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) else if (prot_op == SCSI_PROT_WRITE_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * This method will fill in the SCU Task Context for a SSP Task request. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * following important settings are utilized: -# priority ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * ahead of other task destined for the same Remote Node. -# task_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * (i.e. non-raw frame) is being utilized to perform task management. -#
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * control_frame == 1. This ensures that the proper endianess is set so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * that the bytes are transmitted in the right order for a task frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * @sci_req: This parameter specifies the task request object being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * constructed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) scu_ssp_request_construct_task_context(ireq, task_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) task_context->control_frame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) task_context->priority = SCU_TASK_PRIORITY_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) task_context->transfer_length_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) task_context->type.ssp.frame_type = SSP_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) task_context->ssp_command_iu_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sizeof(struct ssp_task_iu) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * This method is will fill in the SCU Task Context for any type of SATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * request. This is called from the various SATA constructors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @sci_req: The general IO request object which is to be used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * constructing the SCU task context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @task_context: The buffer pointer for the SCU task context which is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * constructed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * The general io request construction is complete. The buffer assignment for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * the command buffer is complete. none Revisit task context construction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * determine what is common for SSP/SMP/STP task context structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void scu_sata_request_construct_task_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct scu_task_context *task_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct isci_remote_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct isci_port *iport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) idev = ireq->target_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) iport = idev->owning_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Fill in the TC with its required data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) task_context->abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) task_context->priority = SCU_TASK_PRIORITY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) task_context->initiator_request = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) task_context->connection_rate = idev->connection_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) task_context->protocol_engine_index = ISCI_PEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) task_context->logical_port_index = iport->physical_port_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) task_context->valid = SCU_TASK_CONTEXT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) task_context->context_type = SCU_TASK_CONTEXT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) task_context->remote_node_index = idev->rnc.remote_node_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) task_context->command_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) task_context->link_layer_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) task_context->do_not_dma_ssp_good_response = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) task_context->strict_ordering = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) task_context->control_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) task_context->timeout_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) task_context->block_guard_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) task_context->address_modifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) task_context->task_phase = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) task_context->ssp_command_iu_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Set the first word of the H2D REG FIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) (iport->physical_port_index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ISCI_TAG_TCI(ireq->io_tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Copy the physical address for the command buffer to the SCU Task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Context. We must offset the command buffer by 4 bytes because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * first 4 bytes are transfered in the body of the TC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dma_addr = sci_io_request_get_dma_addr(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ((char *) &ireq->stp.cmd) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) task_context->command_iu_upper = upper_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) task_context->command_iu_lower = lower_32_bits(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* SATA Requests do not have a response buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) task_context->response_iu_upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) task_context->response_iu_lower = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) scu_sata_request_construct_task_context(ireq, task_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) task_context->control_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) task_context->priority = SCU_TASK_PRIORITY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) task_context->type.stp.fis_type = FIS_REGH2D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) bool copy_rx_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct isci_stp_request *stp_req = &ireq->stp.req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) scu_stp_raw_request_construct_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) stp_req->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) stp_req->sgl.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (copy_rx_frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sci_request_build_sgl(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) stp_req->sgl.index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* The user does not want the data copied to the SGL buffer location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) stp_req->sgl.index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @sci_req: This parameter specifies the request to be constructed as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * optimized request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * @optimized_task_type: This parameter specifies whether the request is to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * value of 1 indicates NCQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * This method will perform request construction common to all types of STP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * returns an indication as to whether the construction was successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static void sci_stp_optimized_request_construct(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u8 optimized_task_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Build the STP task context structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) scu_sata_request_construct_task_context(ireq, task_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Copy over the SGL elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sci_request_build_sgl(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Copy over the number of bytes to be transfered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) task_context->transfer_length_bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * The difference between the DMA IN and DMA OUT request task type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * values are consistent with the difference between FPDMA READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * and FPDMA WRITE values. Add the supplied task type parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * to this difference to set the task type properly for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * DATA OUT (WRITE) case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) - SCU_TASK_TYPE_DMA_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * For the DATA IN (READ) case, simply save the supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * optimized task type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) task_context->task_type = optimized_task_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void sci_atapi_construct(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct sas_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* To simplify the implementation we take advantage of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * silicon's partial acceleration of atapi protocol (dma data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * transfers), so we promote all commands to dma protocol. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) h2d_fis->features |= ATAPI_PKT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) scu_stp_raw_request_construct_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (task->data_dir == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) task->total_xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* clear the response so we can detect arrivial of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * unsolicited h2d fis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ireq->stp.rsp.fis_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) sci_io_request_construct_sata(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bool copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct domain_device *dev = ireq->target_device->domain_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* check for management protocols */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (test_bit(IREQ_TMF, &ireq->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct isci_tmf *tmf = isci_request_access_tmf(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dev_err(&ireq->owning_controller->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) "%s: Request 0x%p received un-handled SAT "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) "management protocol 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) __func__, ireq, tmf->tmf_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!sas_protocol_ata(task->task_proto)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dev_err(&ireq->owning_controller->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) "%s: Non-ATA protocol in SATA path: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) task->task_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* ATAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (dev->sata_dev.class == ATA_DEV_ATAPI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) task->ata_task.fis.command == ATA_CMD_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) sci_atapi_construct(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* non data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (task->data_dir == DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) scu_stp_raw_request_construct_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* NCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (task->ata_task.use_ncq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sci_stp_optimized_request_construct(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) SCU_TASK_TYPE_FPDMAQ_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (task->ata_task.dma_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) sci_stp_optimized_request_construct(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) SCU_TASK_TYPE_DMA_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) } else /* PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return sci_stp_pio_request_construct(ireq, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ireq->protocol = SAS_PROTOCOL_SSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) scu_ssp_io_request_construct_task_context(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) task->data_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) task->total_xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) sci_io_request_build_ssp_command_iu(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) enum sci_status sci_task_request_construct_ssp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* Construct the SSP Task SCU Task Context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) scu_ssp_task_request_construct_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Fill in the SSP Task IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sci_task_request_build_ssp_task_iu(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bool copy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ireq->protocol = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) copy = (task->data_dir == DMA_NONE) ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) status = sci_io_request_construct_sata(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) task->total_xfer_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) task->data_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (status == SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * sci_req_tx_bytes - bytes transferred when reply underruns request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * @ireq: request that was terminated early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #define SCU_TASK_CONTEXT_SRAM 0x200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static u32 sci_req_tx_bytes(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) u32 ret_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (readl(&ihost->smu_registers->address_modifier) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void __iomem *scu_reg_base = ihost->scu_registers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * BAR1 is the scu_registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * 0x20002C = 0x200000 + 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * = start of task context SRAM + offset of (type.ssp.data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * TCi is the io_tag of struct sci_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret_val = readl(scu_reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) enum sci_status sci_request_start(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct scu_task_context *tc = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (state != SCI_REQ_CONSTRUCTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) dev_warn(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) "%s: SCIC IO Request requested to start while in wrong "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) "state %d\n", __func__, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) switch (tc->protocol_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) case SCU_TASK_CONTEXT_PROTOCOL_SMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) case SCU_TASK_CONTEXT_PROTOCOL_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* SSP/SMP Frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) tc->type.ssp.tag = ireq->io_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) tc->type.ssp.target_port_transfer_tag = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) case SCU_TASK_CONTEXT_PROTOCOL_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* STP/SATA Frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * tc->type.stp.ncq_tag = ireq->ncq_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) case SCU_TASK_CONTEXT_PROTOCOL_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* / @todo When do we set no protocol type? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* This should never happen since we build the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Add to the post_context the io tag value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Everything is good go ahead and change state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sci_change_state(&ireq->sm, SCI_REQ_STARTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) sci_io_request_terminate(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) case SCI_REQ_CONSTRUCTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Set to make sure no HW terminate posting is done: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case SCI_REQ_STARTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case SCI_REQ_TASK_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) case SCI_REQ_SMP_WAIT_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) case SCI_REQ_SMP_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case SCI_REQ_STP_UDMA_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) case SCI_REQ_STP_NON_DATA_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) case SCI_REQ_STP_NON_DATA_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) case SCI_REQ_STP_PIO_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) case SCI_REQ_STP_PIO_WAIT_FRAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case SCI_REQ_STP_PIO_DATA_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) case SCI_REQ_STP_PIO_DATA_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) case SCI_REQ_ATAPI_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) case SCI_REQ_ATAPI_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) case SCI_REQ_ATAPI_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* Fall through and change state to ABORTING... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case SCI_REQ_TASK_WAIT_TC_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /* The task frame was already confirmed to have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * sent by the SCU HW. Since the state machine is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * now only waiting for the task response itself,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * abort the request and complete it immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * and don't wait for the task response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) fallthrough; /* and handle like ABORTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) case SCI_REQ_ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) set_bit(IREQ_PENDING_ABORT, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* If the request is only waiting on the remote device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * suspension, return SUCCESS so the caller will wait too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) case SCI_REQ_COMPLETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dev_warn(&ireq->owning_controller->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) "%s: SCIC IO Request requested to abort while in wrong "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) "state %d\n", __func__, ireq->sm.current_state_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) enum sci_status sci_request_complete(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (WARN_ONCE(state != SCI_REQ_COMPLETED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) "isci: request completion from wrong state (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) req_state_name(state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) sci_controller_release_frame(ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ireq->saved_rx_frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* XXX can we just stop the machine and remove the 'final' state? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) sci_change_state(&ireq->sm, SCI_REQ_FINAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u32 event_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (state != SCI_REQ_STP_PIO_DATA_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) __func__, event_code, req_state_name(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) switch (scu_get_event_specifier(event_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* We are waiting for data and the SCU has R_ERR the data frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Go back to waiting for the D2H Register FIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) "%s: pio request unexpected event %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) __func__, event_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* TODO Should we fail the PIO request when we get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * unexpected event?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * This function copies response data for requests returning response data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * instead of sense data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * @sci_req: This parameter specifies the request object for which to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * the response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static void sci_io_request_copy_response(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) void *resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct ssp_response_iu *ssp_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ssp_response = &ireq->ssp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) resp_buf = &isci_tmf->resp.resp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) len = min_t(u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) SSP_RESP_IU_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) be32_to_cpu(ssp_response->response_data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) memcpy(resp_buf, ssp_response->resp_data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) request_started_state_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct ssp_response_iu *resp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u8 datapres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * to determine SDMA status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* There are times when the SCU hardware will return an early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * response because the io request specified more data than is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * returned by the target device (mode pages, inquiry data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * etc.). We must check the response stats to see if this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * truly a failed request or a good request that just got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * completed early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct ssp_response_iu *resp = &ireq->ssp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) sci_swab32_cpy(&ireq->ssp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) &ireq->ssp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) word_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (resp->status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) sci_swab32_cpy(&ireq->ssp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) &ireq->ssp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) word_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * guaranteed to be received before this completion status is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * posted?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) resp_iu = &ireq->ssp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) datapres = resp_iu->datapres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (datapres == 1 || datapres == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* only stp device gets suspended. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (ireq->protocol == SAS_PROTOCOL_STP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) SCU_COMPLETION_TL_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) SCU_COMPLETION_TL_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* both stp/ssp device gets suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) SCU_COMPLETION_TL_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* neither ssp nor stp gets suspended. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) SCU_COMPLETION_TL_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * TODO: This is probably wrong for ACK/NAK timeout conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* In all cases we will treat this as the completion of the IO req. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) request_aborting_state_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* Unless we get some strange error wait for the task abort to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * TODO: Should there be a state change for this completion?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Currently, the decision is to simply allow the task request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * to timeout if the task IU wasn't received successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * There is a potential for receiving multiple task responses if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * we decide to send the task IU again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev_warn(&ireq->owning_controller->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) "%s: TaskRequest:0x%p CompletionCode:%x - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) "ACK/NAK timeout\n", __func__, ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * All other completion status cause the IO to be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * If a NAK was received, then it is up to the user to retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) smp_request_await_response_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* In the AWAIT RESPONSE state, any TC completion is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * unexpected. but if the TC has success status, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * complete the IO anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* These status has been seen in a specific LSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * expander, which sometimes is not able to send smp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * response within 2 ms. This causes our hardware break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * the connection and set TC completion with one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * these SMP_XXX_XX_ERR status. For these type of error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * we ask ihost user to retry the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* All other completion status cause the IO to be complete. If a NAK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * was received, then it is up to the user to retry the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) smp_request_await_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* All other completion status cause the IO to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * complete. If a NAK was received, then it is up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * the user to retry the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct scu_sgl_element *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct scu_sgl_element_pair *sgl_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct isci_request *ireq = to_ireq(stp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!sgl_pair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (sgl_pair->B.address_lower == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) sgl_pair->B.address_upper == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) sgl = &sgl_pair->B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (sgl_pair->next_pair_lower == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) sgl_pair->next_pair_upper == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) pio_sgl->index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) sgl = &sgl_pair->A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* All other completion status cause the IO to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * complete. If a NAK was received, then it is up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * the user to retry the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* transmit DATA_FIS from (current sgl + offset) for input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * parameter length. current sgl and offset is alreay stored in the IO request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct isci_stp_request *stp_req = &ireq->stp.req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct scu_sgl_element_pair *sgl_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct scu_sgl_element *current_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* Recycle the TC and reconstruct it for sending out DATA FIS containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * for the data from current_sgl+offset for the input length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) current_sgl = &sgl_pair->A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) current_sgl = &sgl_pair->B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* update the TC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) task_context->command_iu_upper = current_sgl->address_upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) task_context->command_iu_lower = current_sgl->address_lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) task_context->transfer_length_bytes = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) task_context->type.stp.fis_type = FIS_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* send the new TC out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return sci_controller_continue_io(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct isci_stp_request *stp_req = &ireq->stp.req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct scu_sgl_element_pair *sgl_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct scu_sgl_element *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) u32 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) offset = stp_req->sgl.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) sgl = &sgl_pair->A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) len = sgl_pair->A.length - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) sgl = &sgl_pair->B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) len = sgl_pair->B.length - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (stp_req->pio_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (stp_req->pio_len >= len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (status != SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) stp_req->pio_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* update the current sgl, offset and save for future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) sgl = pio_sgl_next(stp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } else if (stp_req->pio_len < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* Sgl offset will be adjusted and saved for future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) offset += stp_req->pio_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) sgl->address_lower += stp_req->pio_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) stp_req->pio_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) stp_req->sgl.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * @stp_request: The request that is used for the SGL processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * @data_buffer: The buffer of data to be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * @length: The length of the data transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * Copy the data from the buffer for the length specified to the IO request SGL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * specified data region. enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) u8 *data_buf, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct isci_request *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) u8 *src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct sas_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ireq = to_ireq(stp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) src_addr = data_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (task->num_scatter > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) sg = task->scatter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) while (total_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) copy_len = min_t(int, total_len, sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) memcpy(kaddr + sg->offset, src_addr, copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) total_len -= copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) src_addr += copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) BUG_ON(task->total_xfer_len < total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) memcpy(task->scatter, src_addr, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * @sci_req: The PIO DATA IN request that is to receive the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * @data_buffer: The buffer to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * Copy the data buffer to the io request data region. enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static enum sci_status sci_stp_request_pio_data_in_copy_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct isci_stp_request *stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u8 *data_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * If there is less than 1K remaining in the transfer request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * copy just the data for the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) status = sci_stp_request_pio_data_in_copy_data_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) stp_req, data_buffer, stp_req->pio_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (status == SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) stp_req->pio_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* We are transfering the whole frame so copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) status = sci_stp_request_pio_data_in_copy_data_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (status == SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* All other completion status cause the IO to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * complete. If a NAK was received, then it is up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * the user to retry the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) pio_data_out_tx_done_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) bool all_frames_transferred = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct isci_stp_request *stp_req = &ireq->stp.req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Transmit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (stp_req->pio_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) status = sci_stp_request_pio_data_out_transmit_data(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (status == SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (stp_req->pio_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) all_frames_transferred = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) } else if (stp_req->pio_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * this will happen if the all data is written at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * first time after the pio setup fis is received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) all_frames_transferred = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /* all data transferred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (all_frames_transferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * Change the state to SCI_REQ_STP_PIO_DATA_IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * and wait for PIO_SETUP fis / or D2H REg fis. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * All other completion status cause the IO to be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * If a NAK was received, then it is up to the user to retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) u32 frame_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct dev_to_host_fis *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) u32 *frame_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) (void **)&frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if ((status == SCI_SUCCESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) (frame_header->fis_type == FIS_REGD2H)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) sci_controller_copy_sata_response(&ireq->stp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) frame_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) u32 frame_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct dev_to_host_fis *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) u32 *frame_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) (void **)&frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (status != SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (frame_header->fis_type != FIS_REGD2H) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) dev_err(&ireq->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) "%s ERROR: invalid fis type 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) __func__, frame_header->fis_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) sci_controller_copy_sata_response(&ireq->stp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) (u32 *)frame_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* Frame has been decoded return it to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) u32 frame_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) status = process_unsolicited_fis(ireq, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (status == SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (ireq->stp.rsp.status & ATA_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) ireq->sci_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* the d2h ufi is the end of non-data commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (task->data_dir == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * type. The TC for previous Packet fis was already there, we only need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * change the H2D fis content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) task_context->type.stp.fis_type = FIS_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) task_context->transfer_length_bytes = dev->cdb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static void scu_atapi_construct_task_context(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct scu_task_context *task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int cdb_len = dev->cdb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /* reference: SSTL 1.13.4.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * task_type, sata_direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (task->data_dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) task_context->sata_direction = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /* todo: for NO_DATA command, we need to send out raw frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) task_context->sata_direction = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) task_context->type.stp.fis_type = FIS_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /* task phase is set to TX_CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) task_context->task_phase = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* retry counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) task_context->stp_retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /* data transfer size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) task_context->transfer_length_bytes = task->total_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* setup sgl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) sci_request_build_sgl(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sci_io_request_frame_handler(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) u32 frame_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct isci_stp_request *stp_req = &ireq->stp.req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) ssize_t word_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case SCI_REQ_STARTED: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct ssp_frame_hdr ssp_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) void *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) &frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (ssp_hdr.frame_type == SSP_RESPONSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct ssp_response_iu *resp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) (void **)&resp_iu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) resp_iu = &ireq->ssp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (resp_iu->datapres == 0x01 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) resp_iu->datapres == 0x02) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /* not a response frame, why did it get forwarded? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) "%s: SCIC IO Request 0x%p received unexpected "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) "frame %d type 0x%02x\n", __func__, ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) frame_index, ssp_hdr.frame_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * In any case we are done with this frame buffer return it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) case SCI_REQ_TASK_WAIT_TC_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) sci_io_request_copy_response(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) case SCI_REQ_SMP_WAIT_RESP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct scatterlist *sg = &task->smp_task.smp_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) void *frame_header, *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) u8 *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) &frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) kaddr = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) rsp = kaddr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) sci_swab32_cpy(rsp, frame_header, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (rsp[0] == SMP_RESPONSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) void *smp_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) &smp_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) word_cnt = (sg->length/4)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (word_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) word_cnt = min_t(unsigned int, word_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * This was not a response frame why did it get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * forwarded?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) "%s: SCIC SMP Request 0x%p received unexpected "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) "frame %d type 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) rsp[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return sci_stp_request_udma_general_frame_handler(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) case SCI_REQ_STP_UDMA_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* Use the general frame handler to copy the resposne data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (status != SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct dev_to_host_fis *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) u32 *frame_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) (void **)&frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) "%s: SCIC IO Request 0x%p could not get frame "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) "header for frame index %d, status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) switch (frame_header->fis_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) case FIS_REGD2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) sci_controller_copy_sata_response(&ireq->stp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) frame_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* The command has completed with error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dev_warn(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) "%s: IO Request:0x%p Frame Id:%d protocol "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) "violation occurred\n", __func__, stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* Frame has been decoded return it to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) case SCI_REQ_STP_PIO_WAIT_FRAME: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct dev_to_host_fis *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u32 *frame_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) (void **)&frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) "%s: SCIC IO Request 0x%p could not get frame "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) "header for frame index %d, status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) __func__, stp_req, frame_index, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) switch (frame_header->fis_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) case FIS_PIO_SETUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* Get from the frame buffer the PIO Setup Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* Get the data from the PIO Setup The SCU Hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * returns first word in the frame_header and the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * of the data is in the frame buffer so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * back up one dword
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /* transfer_count: first 16bits in the 4th dword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) stp_req->pio_len = frame_buffer[3] & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /* status: 4th byte in the 3rd dword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) stp_req->status = (frame_buffer[2] >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) sci_controller_copy_sata_response(&ireq->stp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) frame_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ireq->stp.rsp.status = stp_req->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /* The next state is dependent on whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * request was PIO Data-in or Data out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (task->data_dir == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) } else if (task->data_dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* Transmit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) status = sci_stp_request_pio_data_out_transmit_data(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (status != SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) case FIS_SETDEVBITS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) case FIS_REGD2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (frame_header->status & ATA_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * Now why is the drive sending a D2H Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * FIS when it is still busy? Do nothing since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * we are still in the right state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) "%s: SCIC PIO Request 0x%p received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) "D2H Register FIS with BSY status "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) "0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) frame_header->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) sci_controller_copy_sata_response(&ireq->stp.rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) frame_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /* FIXME: what do we do here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) /* Frame is decoded return it to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) case SCI_REQ_STP_PIO_DATA_IN: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) struct dev_to_host_fis *frame_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) struct sata_fis_data *frame_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) (void **)&frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) "%s: SCIC IO Request 0x%p could not get frame "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) "header for frame index %d, status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (frame_header->fis_type != FIS_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) "%s: SCIC PIO Request 0x%p received frame %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) "with fis type 0x%02x when expecting a data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) "fis.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) frame_header->fis_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /* Frame is decoded return it to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (stp_req->sgl.index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) ireq->saved_rx_frame_index = frame_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) stp_req->pio_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) (void **)&frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) status = sci_stp_request_pio_data_in_copy_data(stp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) (u8 *)frame_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* Frame is decoded return it to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /* Check for the end of the transfer, are there more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * bytes remaining for this data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (status != SCI_SUCCESS || stp_req->pio_len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if ((stp_req->status & ATA_BUSY) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ireq->target_device->working_request = ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (task->data_dir == DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) scu_atapi_reconstruct_raw_frame_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) scu_atapi_construct_task_context(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) sci_controller_continue_io(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) case SCI_REQ_ATAPI_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return atapi_d2h_reg_frame_handler(ireq, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) case SCI_REQ_ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * TODO: Is it even possible to get an unsolicited frame in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * aborting state?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dev_warn(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) "%s: SCIC IO Request given unexpected frame %x while "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) "in state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) frame_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) sci_controller_release_frame(ihost, frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* We must check ther response buffer to see if the D2H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * Register FIS was received before we got the TC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sci_remote_device_suspend(ireq->target_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) SCI_SW_SUSPEND_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /* If we have an error completion status for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * TC then we can expect a D2H register FIS from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * the device so we must change state to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* TODO Check to see if any of these completion status need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * wait for the device to host register fis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * - this comes only for B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /* All other completion status cause the IO to be complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) enum sci_base_request_states next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) sci_change_state(&ireq->sm, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) /* All other completion status cause the IO to be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) * If a NAK was received, then it is up to the user to retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) * the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct isci_remote_device *idev = ireq->target_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) struct dev_to_host_fis *d2h = &ireq->stp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) u16 len = sci_req_tx_bytes(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) /* likely non-error data underrrun, workaround missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * d2h frame from the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (d2h->fis_type != FIS_REGD2H) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) d2h->fis_type = FIS_REGD2H;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) d2h->flags = (1 << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) d2h->status = 0x50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) d2h->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) d2h->lbal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) d2h->byte_count_low = len & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) d2h->byte_count_high = len >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) d2h->device = 0xa0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) d2h->lbal_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) d2h->lbam_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) d2h->lbah_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) d2h->_r_a = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) d2h->sector_count = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) d2h->sector_count_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) d2h->_r_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) d2h->_r_c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) d2h->_r_d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) status = ireq->sci_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /* the hw will have suspended the rnc, so complete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * request upon pending resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* In this case, there is no UF coming after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * compelte the IO now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ireq->scu_status = SCU_TASK_DONE_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (d2h->fis_type == FIS_REGD2H) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /* UF received change the device state to ATAPI_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) status = ireq->sci_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* If receiving any non-success TC status, no UF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * received yet, then an UF for the status fis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * is coming after (XXX: suspect this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * actually a protocol error or a bug like the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * DONE_UNEXP_FIS case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static int sci_request_smp_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) switch (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) static int sci_request_smp_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return 0; /* There are no Tx/Rx SMP suspend conditions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static int sci_request_ssp_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) switch (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) case SCU_TASK_DONE_TX_RAW_CMD_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) case SCU_TASK_DONE_LF_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static int sci_request_ssp_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return 0; /* There are no Tx/Rx SSP suspend conditions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int sci_request_stpsata_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) switch (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) case SCU_TASK_DONE_TX_RAW_CMD_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) case SCU_TASK_DONE_LL_R_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) case SCU_TASK_DONE_LL_PERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) case SCU_TASK_DONE_REG_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) case SCU_TASK_DONE_SDB_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) unsigned int completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) switch (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) case SCU_TASK_DONE_LF_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) case SCU_TASK_DONE_LL_SY_TERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) case SCU_TASK_DONE_LL_LF_TERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) case SCU_TASK_DONE_BREAK_RCVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) case SCU_TASK_DONE_INV_FIS_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) case SCU_TASK_DONE_UNEXP_FIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) case SCU_TASK_DONE_UNEXP_SDBFIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) case SCU_TASK_DONE_MAX_PLD_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static void sci_request_handle_suspending_completions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) int is_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) int is_tx_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) switch (ireq->protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) case SAS_PROTOCOL_SMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) is_tx = sci_request_smp_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) case SAS_PROTOCOL_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) is_tx = sci_request_ssp_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) case SAS_PROTOCOL_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) is_tx_rx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) sci_request_stpsata_completion_status_is_tx_rx_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dev_warn(&ireq->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) "%s: request %p has no valid protocol\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) __func__, ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (is_tx || is_tx_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) BUG_ON(is_tx && is_tx_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) sci_remote_node_context_suspend(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) &ireq->target_device->rnc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) SCI_HW_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) : SCU_EVENT_TL_RNC_SUSPEND_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) sci_io_request_tc_completion(struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) u32 completion_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) state = ireq->sm.current_state_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) /* Decode those completions that signal upcoming suspension events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) sci_request_handle_suspending_completions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) case SCI_REQ_STARTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) return request_started_state_tc_event(ireq, completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) case SCI_REQ_TASK_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) return ssp_task_request_await_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) case SCI_REQ_SMP_WAIT_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return smp_request_await_response_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) case SCI_REQ_SMP_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return smp_request_await_tc_event(ireq, completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) return stp_request_udma_await_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) case SCI_REQ_STP_NON_DATA_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) return stp_request_non_data_await_h2d_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) case SCI_REQ_STP_PIO_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) return stp_request_pio_await_h2d_completion_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) case SCI_REQ_STP_PIO_DATA_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) return pio_data_out_tx_done_tc_event(ireq, completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) case SCI_REQ_ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return request_aborting_state_tc_event(ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) case SCI_REQ_ATAPI_WAIT_H2D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) return atapi_raw_completion(ireq, completion_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) SCI_REQ_ATAPI_WAIT_PIO_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) case SCI_REQ_ATAPI_WAIT_TC_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) return atapi_raw_completion(ireq, completion_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) SCI_REQ_ATAPI_WAIT_D2H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) case SCI_REQ_ATAPI_WAIT_D2H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return atapi_data_tc_completion_handler(ireq, completion_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) __func__, completion_code, req_state_name(state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) return SCI_FAILURE_INVALID_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * isci_request_process_response_iu() - This function sets the status and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * response iu, in the task struct, from the request object for the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * layer driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * @sas_task: This parameter is the task struct from the upper layer driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) * @resp_iu: This parameter points to the response iu of the completed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) * @dev: This parameter specifies the linux device struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) * none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) static void isci_request_process_response_iu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) struct sas_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) struct ssp_response_iu *resp_iu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) dev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) "%s: resp_iu = %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) "resp_iu->response_data_len = %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) "resp_iu->sense_data_len = %x\nresponse data: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) resp_iu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) resp_iu->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) resp_iu->datapres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) resp_iu->response_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) resp_iu->sense_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) task->task_status.stat = resp_iu->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) /* libsas updates the task status fields based on the response iu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) sas_ssp_task_response(dev, task, resp_iu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * isci_request_set_open_reject_status() - This function prepares the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) * completion for OPEN_REJECT conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * @request: This parameter is the completed isci_request object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * @response_ptr: This parameter specifies the service response for the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * @status_ptr: This parameter specifies the exec status for the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * @open_rej_reason: This parameter specifies the encoded reason for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) * abandon-class reject.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) * none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static void isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct isci_request *request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct sas_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) enum service_response *response_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) enum exec_status *status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) enum sas_open_rej_reason open_rej_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /* Task in the target is done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) *response_ptr = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) *status_ptr = SAS_OPEN_REJECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) task->task_status.open_rej_reason = open_rej_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * isci_request_handle_controller_specific_errors() - This function decodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * controller-specific I/O completion error conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * @request: This parameter is the completed isci_request object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * @response_ptr: This parameter specifies the service response for the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * @status_ptr: This parameter specifies the exec status for the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static void isci_request_handle_controller_specific_errors(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct isci_remote_device *idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) struct isci_request *request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct sas_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) enum service_response *response_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) enum exec_status *status_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) unsigned int cstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) cstatus = request->scu_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) dev_dbg(&request->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) "- controller status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) __func__, request, cstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* Decode the controller-specific errors; most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * important is to recognize those conditions in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * the target may still have a task outstanding that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * must be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * Note that there are SCU completion codes being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) * named in the decode below for which SCIC has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * done work to handle them in a way other than as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * a controller-specific completion code; these are left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * in the decode below for completeness sake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) switch (cstatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) case SCU_TASK_DONE_DMASETUP_DIRERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) case SCU_TASK_DONE_XFERCNT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (task->task_proto == SAS_PROTOCOL_SMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) *response_ptr = SAS_TASK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) /* See if the device has been/is being stopped. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * that we ignore the quiesce state, since we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * concerned about the actual device state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) *status_ptr = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) *status_ptr = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* Task in the target is not done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) *response_ptr = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) *status_ptr = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) *status_ptr = SAM_STAT_TASK_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) case SCU_TASK_DONE_CRC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) case SCU_TASK_DONE_NAK_CMD_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) case SCU_TASK_DONE_EXCESS_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) case SCU_TASK_DONE_UNEXP_FIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) /* Also SCU_TASK_DONE_UNEXP_RESP: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* These are conditions in which the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * has completed the task, so that no cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) *response_ptr = SAS_TASK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* See if the device has been/is being stopped. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * that we ignore the quiesce state, since we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) * concerned about the actual device state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) *status_ptr = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) *status_ptr = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) /* Note that the only open reject completion codes seen here will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * abandon-class codes; all others are automatically retried in the SCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) SAS_OREJ_WRONG_DEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) /* Note - the return of AB0 will change when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) * libsas implements detection of zone violations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) SAS_OREJ_RESV_AB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) SAS_OREJ_RESV_AB1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) SAS_OREJ_RESV_AB2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) SAS_OREJ_RESV_AB3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) SAS_OREJ_BAD_DEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) SAS_OREJ_STP_NORES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) SAS_OREJ_EPROTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) isci_request_set_open_reject_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) request, task, response_ptr, status_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) SAS_OREJ_CONN_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) case SCU_TASK_DONE_LL_R_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) /* Also SCU_TASK_DONE_ACK_NAK_TO: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) case SCU_TASK_DONE_LL_PERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) case SCU_TASK_DONE_LL_SY_TERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /* Also SCU_TASK_DONE_NAK_ERR:*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) case SCU_TASK_DONE_LL_LF_TERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) case SCU_TASK_DONE_LL_ABORT_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) case SCU_TASK_DONE_SEQ_INV_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) /* Also SCU_TASK_DONE_UNEXP_XR: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) case SCU_TASK_DONE_XR_IU_LEN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) case SCU_TASK_DONE_INV_FIS_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) /* Also SCU_TASK_DONE_XR_WD_LEN: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) case SCU_TASK_DONE_SDMA_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) case SCU_TASK_DONE_OFFSET_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) case SCU_TASK_DONE_MAX_PLD_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) case SCU_TASK_DONE_LF_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) case SCU_TASK_DONE_SMP_LL_RX_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) case SCU_TASK_DONE_UNEXP_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) case SCU_TASK_DONE_UNEXP_SDBFIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) case SCU_TASK_DONE_REG_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) case SCU_TASK_DONE_SDB_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) case SCU_TASK_DONE_TASK_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) /* Task in the target is not done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) *response_ptr = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) *status_ptr = SAM_STAT_TASK_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (task->task_proto == SAS_PROTOCOL_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) struct task_status_struct *ts = &task->task_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) struct ata_task_resp *resp = (void *)&ts->buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) resp->frame_len = sizeof(*fis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) memcpy(resp->ending_fis, fis, sizeof(*fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) ts->buf_valid_size = sizeof(*resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /* If an error is flagged let libata decode the fis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (ac_err_mask(fis->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ts->stat = SAS_PROTO_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) ts->stat = SAM_STAT_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) ts->resp = SAS_TASK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) static void isci_request_io_request_complete(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) struct isci_request *request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) enum sci_io_status completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) struct sas_task *task = isci_request_access_task(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct ssp_response_iu *resp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) unsigned long task_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct isci_remote_device *idev = request->target_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) enum service_response response = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) enum exec_status status = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) "%s: request = %p, task = %p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) "task->data_dir = %d completion_status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) __func__, request, task, task->data_dir, completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) /* The request is done from an SCU HW perspective. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* This is an active request being completed from the core. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) switch (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) case SCI_IO_FAILURE_RESPONSE_VALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) __func__, request, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (sas_protocol_ata(task->task_proto)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) isci_process_stp_response(task, &request->stp.rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) } else if (SAS_PROTOCOL_SSP == task->task_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) /* crack the iu response buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) resp_iu = &request->ssp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) isci_request_process_response_iu(task, resp_iu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) &ihost->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) } else if (SAS_PROTOCOL_SMP == task->task_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) "SAS_PROTOCOL_SMP protocol\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) dev_err(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) "%s: unknown protocol\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /* use the task status set in the task struct by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) * isci_request_process_response_iu call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) response = task->task_status.resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) status = task->task_status.stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) case SCI_IO_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) case SCI_IO_SUCCESS_IO_DONE_EARLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) response = SAS_TASK_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) status = SAM_STAT_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /* This was an SSP / STP / SATA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * There is a possibility that less data than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * the maximum was transferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) u32 transferred_length = sci_req_tx_bytes(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) task->task_status.residual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) = task->total_xfer_len - transferred_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) /* If there were residual bytes, call this an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) * underrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (task->task_status.residual != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) status = SAS_DATA_UNDERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) case SCI_IO_FAILURE_TERMINATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) __func__, request, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /* The request was terminated explicitly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) response = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) /* See if the device has been/is being stopped. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * that we ignore the quiesce state, since we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * concerned about the actual device state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) status = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) status = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) isci_request_handle_controller_specific_errors(idev, request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) task, &response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) /* This is a special case, in that the I/O completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) * is telling us that the device needs a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * In order for the device reset condition to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) * noticed, the I/O has to be handled in the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) * handler. Set the reset flag and cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) * SCSI error thread to be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) spin_lock_irqsave(&task->task_state_lock, task_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) spin_unlock_irqrestore(&task->task_state_lock, task_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) /* Fail the I/O. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) response = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) status = SAM_STAT_TASK_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) case SCI_FAILURE_RETRY_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) /* Fail the I/O so it can be retried. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) response = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) status = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) status = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /* Catch any otherwise unhandled error codes here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) "%s: invalid completion code: 0x%x - "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) "isci_request = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) __func__, completion_status, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) response = SAS_TASK_UNDELIVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) /* See if the device has been/is being stopped. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * that we ignore the quiesce state, since we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * concerned about the actual device state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (!idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) status = SAS_DEVICE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) status = SAS_ABORTED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) if (SAS_PROTOCOL_SMP == task->task_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) switch (task->task_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) case SAS_PROTOCOL_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (task->data_dir == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) if (task->num_scatter == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) /* 0 indicates a single dma address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) dma_unmap_single(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) request->zero_scatter_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) task->total_xfer_len, task->data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) else /* unmap the sgl dma addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) dma_unmap_sg(&ihost->pdev->dev, task->scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) request->num_sg_entries, task->data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) case SAS_PROTOCOL_SMP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) struct scatterlist *sg = &task->smp_task.smp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) struct smp_req *smp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) /* need to swab it back in case the command buffer is re-used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) kaddr = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) smp_req = kaddr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) spin_lock_irqsave(&task->task_state_lock, task_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) task->task_status.resp = response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) task->task_status.stat = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /* Normal notification (task_done) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) task->task_state_flags |= SAS_TASK_STATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) SAS_TASK_STATE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) spin_unlock_irqrestore(&task->task_state_lock, task_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /* complete the io request to the core. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) sci_controller_complete_io(ihost, request->target_device, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) /* set terminated handle so it cannot be completed or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) * terminated again, and to cause any calls into abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * task to recognize the already completed case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) set_bit(IREQ_TERMINATED, &request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ireq_done(ihost, request, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) struct domain_device *dev = ireq->target_device->domain_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) enum sci_base_request_states state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) struct sas_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) /* XXX as hch said always creating an internal sas_task for tmf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) * requests would simplify the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) /* all unaccelerated request types (non ssp or ncq) handled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) * substates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (!task && dev->dev_type == SAS_END_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) state = SCI_REQ_TASK_WAIT_TC_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) state = SCI_REQ_SMP_WAIT_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) } else if (task && sas_protocol_ata(task->task_proto) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) !task->ata_task.use_ncq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (dev->sata_dev.class == ATA_DEV_ATAPI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) task->ata_task.fis.command == ATA_CMD_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) state = SCI_REQ_ATAPI_WAIT_H2D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) } else if (task->data_dir == DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) } else if (task->ata_task.dma_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) } else /* PIO */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) state = SCI_REQ_STP_PIO_WAIT_H2D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) /* SSP or NCQ are fully accelerated, no substates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) sci_change_state(sm, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) struct isci_host *ihost = ireq->owning_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) /* Tell the SCI_USER that the IO request is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (!test_bit(IREQ_TMF, &ireq->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) isci_request_io_request_complete(ihost, ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) ireq->sci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) isci_task_request_complete(ihost, ireq, ireq->sci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /* Setting the abort bit in the Task Context is required by the silicon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) ireq->tc->abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) ireq->target_device->working_request = ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) ireq->target_device->working_request = ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) static const struct sci_base_state sci_request_state_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) [SCI_REQ_INIT] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) [SCI_REQ_CONSTRUCTED] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) [SCI_REQ_STARTED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) .enter_state = sci_request_started_state_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) [SCI_REQ_STP_PIO_WAIT_H2D] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) [SCI_REQ_STP_PIO_DATA_IN] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) [SCI_REQ_STP_PIO_DATA_OUT] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) [SCI_REQ_TASK_WAIT_TC_COMP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) [SCI_REQ_TASK_WAIT_TC_RESP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) [SCI_REQ_SMP_WAIT_RESP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) [SCI_REQ_SMP_WAIT_TC_COMP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) [SCI_REQ_ATAPI_WAIT_H2D] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) [SCI_REQ_ATAPI_WAIT_D2H] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) [SCI_REQ_COMPLETED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) .enter_state = sci_request_completed_state_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) [SCI_REQ_ABORTING] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) .enter_state = sci_request_aborting_state_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) [SCI_REQ_FINAL] = { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) sci_general_request_construct(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) struct isci_remote_device *idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) ireq->target_device = idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) ireq->protocol = SAS_PROTOCOL_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) ireq->sci_status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) ireq->scu_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) ireq->post_context = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) sci_io_request_construct(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct isci_remote_device *idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct domain_device *dev = idev->domain_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) /* Build the common part of the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) sci_general_request_construct(ihost, idev, ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return SCI_FAILURE_INVALID_REMOTE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (dev->dev_type == SAS_END_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) /* pass */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) else if (dev_is_sata(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) else if (dev_is_expander(dev->dev_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) /* pass */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) enum sci_status sci_task_request_construct(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) struct isci_remote_device *idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) u16 io_tag, struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) struct domain_device *dev = idev->domain_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) /* Build the common part of the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) sci_general_request_construct(ihost, idev, ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) set_bit(IREQ_TMF, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) memset(ireq->tc, 0, sizeof(struct scu_task_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) /* Set the protocol indicator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (dev_is_sata(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) ireq->protocol = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) ireq->protocol = SAS_PROTOCOL_SSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) static enum sci_status isci_request_ssp_request_construct(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) struct isci_request *request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) dev_dbg(&request->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) "%s: request = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) status = sci_io_request_construct_basic_ssp(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) struct host_to_dev_fis *fis = &ireq->stp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct ata_queued_cmd *qc = task->uldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) dev_dbg(&ireq->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) "%s: ireq = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (!task->ata_task.device_control_reg_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) fis->flags |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) fis->flags &= 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) status = sci_io_request_construct_basic_sata(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) qc->tf.command == ATA_CMD_FPDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) qc->tf.command == ATA_CMD_FPDMA_RECV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) qc->tf.command == ATA_CMD_FPDMA_SEND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) qc->tf.command == ATA_CMD_NCQ_NON_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) fis->sector_count = qc->tag << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) ireq->tc->type.stp.ncq_tag = qc->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) static enum sci_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) sci_io_request_construct_smp(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct isci_request *ireq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) struct sas_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) struct scatterlist *sg = &task->smp_task.smp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) struct isci_remote_device *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) struct scu_task_context *task_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) struct isci_port *iport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) struct smp_req *smp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) u8 req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) kaddr = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) smp_req = kaddr + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) * Look at the SMP requests' header fields; for certain SAS 1.x SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * functions under SAS 2.0, a zero request length really indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) * a non-zero default length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (smp_req->req_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) switch (smp_req->func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) case SMP_DISCOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) case SMP_REPORT_PHY_ERR_LOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) case SMP_REPORT_PHY_SATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) case SMP_REPORT_ROUTE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) smp_req->req_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) case SMP_CONF_ROUTE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) case SMP_PHY_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) case SMP_PHY_TEST_FUNCTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) smp_req->req_len = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) /* Default - zero is a valid default for 2.0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) req_len = smp_req->req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) cmd = *(u32 *) smp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) ireq->protocol = SAS_PROTOCOL_SMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) /* byte swap the smp request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) task_context = ireq->tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) idev = ireq->target_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) iport = idev->owning_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * Fill in the TC with its required data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * 00h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) task_context->priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) task_context->initiator_request = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) task_context->connection_rate = idev->connection_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) task_context->protocol_engine_index = ISCI_PEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) task_context->logical_port_index = iport->physical_port_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) task_context->abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) task_context->valid = SCU_TASK_CONTEXT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) task_context->context_type = SCU_TASK_CONTEXT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) /* 04h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) task_context->remote_node_index = idev->rnc.remote_node_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) task_context->command_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) /* 08h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) task_context->link_layer_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) task_context->do_not_dma_ssp_good_response = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) task_context->strict_ordering = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) task_context->control_frame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) task_context->timeout_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) task_context->block_guard_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) /* 0ch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) task_context->address_modifier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) /* 10h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) task_context->ssp_command_iu_length = req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) /* 14h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) task_context->transfer_length_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) * 18h ~ 30h, protocol specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * since commandIU has been build by framework at this point, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * copy the frist DWord from command IU to this location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) memcpy(&task_context->type.smp, &cmd, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) * 40h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * "For SMP you could program it to zero. We would prefer that way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * so that done code will be consistent." - Venki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) task_context->task_phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) (iport->physical_port_index <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ISCI_TAG_TCI(ireq->io_tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) * Copy the physical address for the command buffer to the SCU Task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) * Context command buffer should not contain command header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) /* SMP response comes as UF, so no need to set response IU address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) task_context->response_iu_upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) task_context->response_iu_lower = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) * isci_smp_request_build() - This function builds the smp request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) * @ireq: This parameter points to the isci_request allocated in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * request construct function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) * SCI_SUCCESS on successfull completion, or specific failure code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) static enum sci_status isci_smp_request_build(struct isci_request *ireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) struct sas_task *task = isci_request_access_task(ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) struct device *dev = &ireq->isci_host->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) enum sci_status status = SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) status = sci_io_request_construct_smp(dev, ireq, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (status != SCI_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) dev_dbg(&ireq->isci_host->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) "%s: failed with status = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) * isci_io_request_build() - This function builds the io request object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) * @ihost: This parameter specifies the ISCI host object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) * @request: This parameter points to the isci_request object allocated in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) * request construct function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) * @sci_device: This parameter is the handle for the sci core's remote device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) * object that is the destination for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) * SCI_SUCCESS on successfull completion, or specific failure code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) static enum sci_status isci_io_request_build(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) struct isci_request *request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) struct isci_remote_device *idev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) enum sci_status status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) struct sas_task *task = isci_request_access_task(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) "%s: idev = 0x%p; request = %p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) "num_scatter = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) task->num_scatter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) /* map the sgl addresses, if present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) * libata does the mapping for sata devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * before we get the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (task->num_scatter &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) !sas_protocol_ata(task->task_proto) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) !(SAS_PROTOCOL_SMP & task->task_proto)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) request->num_sg_entries = dma_map_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) &ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) task->scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) task->num_scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) task->data_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (request->num_sg_entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) return SCI_FAILURE_INSUFFICIENT_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) status = sci_io_request_construct(ihost, idev, request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) "%s: failed request construct\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) switch (task->task_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) case SAS_PROTOCOL_SMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) status = isci_smp_request_build(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) case SAS_PROTOCOL_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) status = isci_request_ssp_request_construct(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) case SAS_PROTOCOL_SATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) case SAS_PROTOCOL_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) status = isci_request_stp_request_construct(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) "%s: unknown protocol\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) return SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) return SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) struct isci_request *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) ireq->io_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) ireq->io_request_completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) ireq->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) ireq->num_sg_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) return ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) struct sas_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) u16 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) struct isci_request *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) ireq = isci_request_from_tag(ihost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) ireq->ttype_ptr.io_task_ptr = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) clear_bit(IREQ_TMF, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) task->lldd_task = ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) struct isci_tmf *isci_tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) u16 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) struct isci_request *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) ireq = isci_request_from_tag(ihost, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) set_bit(IREQ_TMF, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) return ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) struct sas_task *task, u16 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) enum sci_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) struct isci_request *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) /* do common allocation and init of request object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) ireq = isci_io_request_from_tag(ihost, task, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) status = isci_io_request_build(ihost, ireq, idev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) "%s: request_construct failed - status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) spin_lock_irqsave(&ihost->scic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) if (isci_task_is_ncq_recovery(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) /* The device is in an NCQ recovery state. Issue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) * request on the task side. Note that it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) * complete on the I/O request side because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * request was built that way (ie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) * ireq->is_task_management_request is false).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) status = sci_controller_start_task(ihost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) status = SCI_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) /* send the request, let the core assign the IO TAG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) status = sci_controller_start_io(ihost, idev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) ireq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (status != SCI_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) dev_dbg(&ihost->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) "%s: failed request start (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) spin_unlock_irqrestore(&ihost->scic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) /* Either I/O started OK, or the core has signaled that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) * the device needs a target reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (status != SCI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) /* The request did not really start in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) * hardware, so clear the request handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) * here so no terminations will be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) set_bit(IREQ_TERMINATED, &ireq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) spin_unlock_irqrestore(&ihost->scic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) if (status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) /* Signal libsas that we need the SCSI error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) * handler thread to work on this I/O and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) * we want a device reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) spin_lock_irqsave(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) spin_unlock_irqrestore(&task->task_state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) /* Cause this task to be scheduled in the SCSI error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) * handler thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) sas_task_abort(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) /* Change the status, since we are holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * the I/O until it is managed by the SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) * error handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) status = SCI_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }