^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Adaptec U320 device driver firmware for Linux and FreeBSD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2000-2002 Adaptec Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * notice, this list of conditions, and the following disclaimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * without modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 2. Redistributions in binary form must reproduce at minimum a disclaimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * substantially similar to the "NO WARRANTY" disclaimer below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * ("Disclaimer") and any redistribution must be conditioned upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * including a substantially similar Disclaimer requirement for further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * binary redistribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * 3. Neither the names of the above-listed copyright holders nor the names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * of any contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Alternatively, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * GNU General Public License ("GPL") version 2 as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * NO WARRANTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * POSSIBILITY OF SUCH DAMAGES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * $FreeBSD$
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) PATCH_ARG_LIST = "struct ahd_softc *ahd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) PREFIX = "ahd_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "aic79xx.reg"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "scsi_message.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) test SEQINTCODE, 0xFF jz idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) SET_SEQINTCODE(NO_SEQINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) idle_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Convert ERROR status into a sequencer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * interrupt to handle the case of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * interrupt collision on the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * setting of HWERR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) test ERROR, 0xFF jz no_error_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) SET_SEQINTCODE(SAW_HWERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) no_error_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) test SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * If the kernel has caught up with us, thaw the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mov A, KERNEL_QFREEZE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) cmp QFREEZE_COUNT, A jne check_frozen_completions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mov A, KERNEL_QFREEZE_COUNT[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) cmp QFREEZE_COUNT[1], A jne check_frozen_completions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) and SEQ_FLAGS2, ~SELECTOUT_QFROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) jmp check_waiting_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) check_frozen_completions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) test SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If we have completions stalled waiting for the qfreeze
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * to take effect, move them over to the complete_scb list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * now that no selections are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) cmp COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Find the end of the qfreeze list. The first element has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * to be treated specially.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bmov SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Now the normal loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bmov SCBPTR, SCB_NEXT_COMPLETE, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) join_lists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) bmov COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mvi COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) jmp idle_loop_checkbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) check_waiting_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * ENSELO is cleared by a SELDO, so we must test for SELDO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * one last time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) test SSTAT0, SELDO jnz select_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) call start_selection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) idle_loop_checkbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) test SSTAT0, SELDO jnz select_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) test SSTAT0, SELDI jnz select_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) test SCSISIGO, ATNO jz idle_loop_check_nonpackreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) call unexpected_nonpkt_phase_find_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) idle_loop_check_nonpackreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) test SSTAT2, NONPACKREQ jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) call unexpected_nonpkt_phase_find_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * On Rev A. hardware, the busy LED is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * turned on automaically during selections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * and re-selections. Make the LED status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * more useful by forcing it to be on so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * long as one of our data FIFOs is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) and A, FIFO0FREE|FIFO1FREE, DFFSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cmp A, FIFO0FREE|FIFO1FREE jne . + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) and SBLKCTL, ~DIAGLEDEN|DIAGLEDON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) jmp . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) or SBLKCTL, DIAGLEDEN|DIAGLEDON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) call idle_loop_gsfifo_in_scsi_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) call idle_loop_service_fifos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) call idle_loop_cchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) jmp idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) idle_loop_gsfifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) idle_loop_gsfifo_in_scsi_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) test LQISTAT2, LQIGSAVAIL jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * We have received good status for this transaction. There may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * still be data in our FIFOs draining to the host. Complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * the SCB only if all data has transferred to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) good_status_IU_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bmov SCBPTR, GSFIFO, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) clr SCB_SCSI_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * If a command completed before an attempted task management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * function completed, notify the host after disabling any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * pending select-outs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) test SSTAT0, SELDO|SELINGO jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) and SCSISEQ0, ~ENSELO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) gsfifo_complete_normally:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) or SCB_CONTROL, STATUS_RCVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Since this status did not consume a FIFO, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * be a bit more dilligent in how we check for FIFOs pertaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * to this transaction. There are two states that a FIFO still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * transferring data may be in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * 1) Configured and draining to the host, with a FIFO handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * 2) Pending cfg4data, fifo not empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Case 1 can be detected by noticing a non-zero FIFO active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * count in the SCB. In this case, we allow the routine servicing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * the FIFO to complete the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Case 2 implies either a pending or yet to occur save data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * pointers for this same context in the other FIFO. So, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * we detect case 1, we will properly defer the post of the SCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * and achieve the desired result. The pending cfg4data will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * notice that status has been received and complete the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) call complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) jmp idle_loop_gsfifo_in_scsi_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) idle_loop_service_fifos:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) SET_MODE(M_DFF0, M_DFF0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) call longjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) idle_loop_next_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) SET_MODE(M_DFF1, M_DFF1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) idle_loop_cchan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) SET_MODE(M_CCHAN, M_CCHAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) or QOFF_CTLSTA, HS_MAILBOX_ACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) mov LOCAL_HS_MAILBOX, HS_MAILBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) hs_mailbox_empty:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) test CCSCBCTL, CCSCBDONE jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) scbdma_tohost_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * An SCB has been successfully uploaded to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * If the SCB was uploaded for some reason other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * bad SCSI status (currently only for underruns), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * queue the SCB for normal completion. Otherwise, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * wait until any select-out activity has halted, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * then queue the completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) and CCSCBCTL, ~(CCARREN|CCSCBEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mvi COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) test SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bmov SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) bmov COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) scbdma_queue_completion:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) fill_qoutfifo_dmadone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) and CCSCBCTL, ~(CCARREN|CCSCBEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) call qoutfifo_updated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) test QOFF_CTLSTA, SDSCB_ROLLOVR jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) qoutfifo_updated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * If there are more commands waiting to be dma'ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * to the host, always coalesce. Otherwise honor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * host's wishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * If we have relatively few commands outstanding, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * bother waiting for another command to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) test CMDS_PENDING[1], 0xFF jnz coalesce_by_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Add -1 so that jnc means <= not just < */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) add A, -1, INT_COALESCING_MINCMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) add NONE, A, CMDS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) jnc issue_cmdcmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * If coalescing, only coalesce up to the limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * provided by the host driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) coalesce_by_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mov A, INT_COALESCING_MAXCMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) add NONE, A, INT_COALESCING_CMDCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) jc issue_cmdcmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * If the timer is not currently active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * fire it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) test INTCTL, SWTMINTMASK jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bmov SWTIMER, INT_COALESCING_TIMER, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) or INTCTL, SWTMINTEN|SWTIMER_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) and INTCTL, ~SWTMINTMASK ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) issue_cmdcmplt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mvi INTSTAT, CMDCMPLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) clr INT_COALESCING_CMDCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) or INTCTL, SWTMINTMASK ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) fetch_new_scb_inprog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) test CCSCBCTL, ARRDONE jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) fetch_new_scb_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) and CCSCBCTL, ~(CCARREN|CCSCBEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) clr A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) add CMDS_PENDING, 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) adc CMDS_PENDING[1], A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * "Short Luns" are not placed into outgoing LQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * packets in the correct byte order. Use a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * sized lun field instead and fill it with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * one byte of lun information we support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mov SCB_PKT_LUN[6], SCB_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * The FIFO use count field is shared with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * tag set by the host so that our SCB dma engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * knows the correct location to store the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Set it to zero before processing the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) clr SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Update the next SCB address to download. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * NULL out the SCB links since these fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * occupy the same location as SCB_NEXT_SCB_BUSADDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mvi SCB_NEXT[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mvi SCB_NEXT2[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Increment our position in the QINFIFO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mov NONE, SNSCB_QOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Save SCBID of this SCB in REG0 since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * SCBPTR will be clobbered during target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * list updates. We also record the SCB's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * flags so that we can refer to them even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * after SCBPTR has been changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bmov REG0, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mov A, SCB_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Find the tail SCB of the execution queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * for this target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) shr SINDEX, 3, SCB_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) and SINDEX, ~0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mvi SINDEX[1], (WAITING_SCB_TAILS >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bmov DINDEX, SINDEX, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bmov SCBPTR, SINDIR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Update the tail to point to the new SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bmov DINDIR, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * If the queue was empty, queue this SCB as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * the first for this target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * SCBs that want to send messages must always be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * at the head of their per-target queue so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * ATN can be asserted even if the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * negotiation agreement is packetized. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * target queue is empty, the SCB can be queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * immediately. If the queue is not empty, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * wait for it to empty before entering this SCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * into the waiting for selection queue. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * our batching and round-robin selection scheme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * could allow commands to be queued out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * To simplify the implementation, we stop pulling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * new commands from the host until the MK_MESSAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * SCB can be queued to the waiting for selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) test A, MK_MESSAGE jz batch_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * If the last SCB is also a MK_MESSAGE SCB, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * order is preserved even if we batch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) test SCB_CONTROL, MK_MESSAGE jz batch_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Defer this SCB and stop fetching new SCBs until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * it can be queued. Since the SCB_SCSIID of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * tail SCB must be the same as that of the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * queued SCB, there is no need to restore the SCBID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) or SEQ_FLAGS2, PENDING_MK_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) bmov MK_MESSAGE_SCB, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mov MK_MESSAGE_SCSIID, SCB_SCSIID ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) batch_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Otherwise just update the previous tail SCB to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * point to the new tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bmov SCB_NEXT, REG0, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) first_new_target_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * Append SCB to the tail of the waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * selection list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bmov SCBPTR, WAITING_TID_TAIL, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bmov SCB_NEXT2, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bmov WAITING_TID_TAIL, REG0, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) first_new_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Whole list is empty, so the head of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the list must be initialized too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bmov WAITING_TID_HEAD, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) bmov WAITING_TID_TAIL, REG0, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) scbdma_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Don't bother downloading new SCBs to execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * if select-outs are currently frozen or we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * a MK_MESSAGE SCB waiting to enter the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) test SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) jnz scbdma_no_new_scbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) scbdma_no_new_scbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) fill_qoutfifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Keep track of the SCBs we are dmaing just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * in case the DMA fails or is aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mvi CCSCBCTL, CCSCBRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mov A, QOUTFIFO_NEXT_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) bmov SCBPTR, COMPLETE_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) fill_qoutfifo_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) bmov CCSCBRAM, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mov CCSCBRAM, SCB_SGPTR[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mov CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mov NONE, SDSCB_QOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) inc INT_COALESCING_CMDCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) add CMDS_PENDING, -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) adc CMDS_PENDING[1], -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Don't cross an ADB or Cachline boundary when DMA'ing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * completion entries. In PCI mode, at least in 32/33
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * configurations, the SCB DMA engine may lose its place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * in the data-stream should the target force a retry on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * something other than an 8byte aligned boundary. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * PCI-X mode, we do this to avoid split transactions since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * many chipsets seem to be unable to format proper split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * completions to continue the data transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) add SINDEX, A, CCSCBADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) test SINDEX, CACHELINE_MASK jz fill_qoutfifo_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) bmov SCBPTR, SCB_NEXT_COMPLETE, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) jmp fill_qoutfifo_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fill_qoutfifo_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mov SCBHCNT, CCSCBADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mvi CCSCBCTL, CCSCBEN|CCSCBRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) fetch_new_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dma_complete_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) bmov SCBHADDR, SCB_BUSADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Either post or fetch an SCB from host memory. The caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * is responsible for polling for transfer completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Prerequisits: Mode == M_CCHAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * SINDEX contains CCSCBCTL flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * SCBHADDR set to Host SCB address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * SCBPTR set to SCB src location on "push" operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) SET_SRC_MODE M_CCHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) SET_DST_MODE M_CCHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dma_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mvi SCBHCNT, SCB_TRANSFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mov CCSCBCTL, SINDEX ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) setjmp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * At least on the A, a return in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * instruction as the bmov results in a return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * to the caller, not to the new address at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * top of the stack. Since we want the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * (we use setjmp to register a handler from an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * interrupt context but not invoke that handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * until we return to our idle loop), use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * separate ret instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bmov LONGJMP_ADDR, STACK, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) setjmp_inline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bmov LONGJMP_ADDR, STACK, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) longjmp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) bmov STACK, LONGJMP_ADDR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*************************** Chip Bug Work Arounds ****************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Must disable interrupts when setting the mode pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * register as an interrupt occurring mid update will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * fail to store the new mode value for restoration on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * an iret.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) set_mode_work_around:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) mvi SEQINTCTL, INTVEC1DSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) mov MODE_PTR, SINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) clr SEQINTCTL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) set_seqint_work_around:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mov SEQINTCODE, SINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mvi SEQINTCODE, NO_SEQINT ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /************************ Packetized LongJmp Routines *************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) start_selection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Razor #494
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Rev A hardware fails to update LAST/CURR/NEXTSCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * correctly after a packetized selection in several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * 1) If only one command existed in the queue, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * LAST/CURR/NEXTSCB are unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * 2) In a non QAS, protocol allowed phase change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * the queue is shifted 1 too far. LASTSCB is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * the last SCB that was correctly processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * 3) In the QAS case, if the full list of commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * was successfully sent, NEXTSCB is NULL and neither
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * CURRSCB nor LASTSCB can be trusted. We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * manually walk the list counting MAXCMDCNT elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * to find the last SCB that was sent correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * To simplify the workaround for this bug in SELDO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * handling, we initialize LASTSCB prior to enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * selection so we can rely on it even for case #1 above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) bmov LASTSCB, WAITING_TID_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) bmov CURRSCB, WAITING_TID_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) bmov SCBPTR, WAITING_TID_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) shr SELOID, 4, SCB_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * If we want to send a message to the device, ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * we are selecting with atn regardless of our packetized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * agreement. Since SPI4 only allows target reset or PPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * messages if this is a packetized connection, the change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * to our negotiation table entry for this selection will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * be cleared when the message is acted on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) test SCB_CONTROL, MK_MESSAGE jz . + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) mov NEGOADDR, SELOID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) or NEGCONOPTS, ENAUTOATNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) or SCSISEQ0, ENSELO ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Allocate a FIFO for a non-packetized transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * In RevA hardware, both FIFOs must be free before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * can allocate a FIFO for a non-packetized transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) allocate_fifo_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Do whatever work is required to free a FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) call idle_loop_service_fifos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) allocate_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) and A, FIFO0FREE|FIFO1FREE, DFFSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) test DFFSTAT, FIFO1FREE jnz allocate_fifo1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) test DFFSTAT, FIFO0FREE jz allocate_fifo_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) mvi DFFSTAT, B_CURRFIFO_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) SET_MODE(M_DFF0, M_DFF0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) allocate_fifo1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) mvi DFFSTAT, CURRFIFO_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) SET_MODE(M_DFF1, M_DFF1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * We have been reselected as an initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * or selected as a target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) select_in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * On Rev A. hardware, the busy LED is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * turned on automaically during selections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * and re-selections. Make the LED status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * more useful by forcing it to be on from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * the point of selection until our idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * loop determines that neither of our FIFOs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * are busy. This handles the non-packetized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * case nicely as we will not return to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * idle loop until the busfree at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * each transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) or SBLKCTL, DIAGLEDEN|DIAGLEDON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Test to ensure that the bus has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * already gone free prior to clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * any stale busfree status. This avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * a window whereby a busfree just after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * a selection could be missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) test SCSISIGI, BSYI jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) mvi CLRSINT1,CLRBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) or SIMODE1, ENBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) or SXFRCTL0, SPIOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) and SAVED_SCSIID, SELID_MASK, SELID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) and A, OID, IOWNID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) or SAVED_SCSIID, A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mvi CLRSINT0, CLRSELDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * We have successfully selected out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Clear SELDO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Dequeue all SCBs sent from the waiting queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Requeue all SCBs *not* sent to the tail of the waiting queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Take Razor #494 into account for above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * In Packetized Mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Return to the idle loop. Our interrupt handler will take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * care of any incoming L_Qs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * In Non-Packetize Mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Continue to our normal state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) select_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * On Rev A. hardware, the busy LED is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * turned on automaically during selections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * and re-selections. Make the LED status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * more useful by forcing it to be on from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * the point of re-selection until our idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * loop determines that neither of our FIFOs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * are busy. This handles the non-packetized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * case nicely as we will not return to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * idle loop until the busfree at the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * each transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) or SBLKCTL, DIAGLEDEN|DIAGLEDON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Clear out all SCBs that have been successfully sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * For packetized, the LQO manager clears ENSELO on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * the assertion of SELDO. If we are non-packetized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * LASTSCB and CURRSCB are accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) test SCSISEQ0, ENSELO jnz use_lastscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * The update is correct for LQOSTAT1 errors. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * but LQOBUSFREE are handled by kernel interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * If we see LQOBUSFREE, return to the idle loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Once we are out of the select_out critical section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * the kernel will cleanup the LQOBUSFREE and we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * eventually restart the selection if appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) test LQOSTAT1, LQOBUSFREE jnz idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * On a phase change oustside of packet boundaries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * LASTSCB points to the currently active SCB context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * on the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * If the hardware has traversed the whole list, NEXTSCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * will be NULL, CURRSCB and LASTSCB cannot be trusted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * but MAXCMDCNT is accurate. If we stop part way through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * the list or only had one command to issue, NEXTSCB[1] is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * not NULL and LASTSCB is the last command to go out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Brute force walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bmov SCBPTR, WAITING_TID_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) mvi SEQINTCTL, INTVEC1DSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mvi MODE_PTR, MK_MODE(M_CFG, M_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) mov A, MAXCMDCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) clr SEQINTCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) find_lastscb_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dec A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) test A, 0xFF jz found_last_sent_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) bmov SCBPTR, SCB_NEXT, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) jmp find_lastscb_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) use_lastscb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) bmov SCBPTR, LASTSCB, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) found_last_sent_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) bmov CURRSCB, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) curscb_ww_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bmov SCBPTR, CURRSCB, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * The whole list made it. Clear our tail pointer to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * that the per-target selection queue is now empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Requeue any SCBs not sent, to the tail of the waiting Q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * We know that neither the per-TID list nor the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * TIDs is empty. Use this knowledge to our advantage and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * queue the remainder to the tail of the global execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) bmov REG0, SCB_NEXT, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) select_out_queue_remainder:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) bmov SCBPTR, WAITING_TID_TAIL, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) bmov SCB_NEXT2, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) bmov WAITING_TID_TAIL, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) jmp select_out_inc_tid_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) select_out_clear_tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Queue any pending MK_MESSAGE SCB for this target now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * that the queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) test SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mov A, MK_MESSAGE_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cmp SCB_SCSIID, A jne select_out_no_mk_message_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) and SEQ_FLAGS2, ~PENDING_MK_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bmov REG0, MK_MESSAGE_SCB, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) jmp select_out_queue_remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) select_out_no_mk_message_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * Clear this target's execution tail and increment the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) shr DINDEX, 3, SCB_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) or DINDEX, 1; /* Want only the second byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) mvi DINDIR, SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) select_out_inc_tid_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bmov SCBPTR, WAITING_TID_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) bmov WAITING_TID_HEAD, SCB_NEXT2, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) mvi WAITING_TID_TAIL[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) bmov SCBPTR, CURRSCB, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mvi CLRSINT0, CLRSELDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * If this is a packetized connection, return to our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * idle_loop and let our interrupt handler deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * any connection setup/teardown issues. The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * exceptions are the case of MK_MESSAGE and task management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * SCBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * In the A, the LQO manager transitions to LQOSTOP0 even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * we have selected out with ATN asserted and the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * REQs in a non-packet phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) test SCB_CONTROL, MK_MESSAGE jz select_out_no_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) test SCSISIGO, ATNO jnz select_out_non_packetized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) select_out_no_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) jmp idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) select_out_non_packetized:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Non packetized request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) and SCSISEQ0, ~ENSELO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Test to ensure that the bus has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * already gone free prior to clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * any stale busfree status. This avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * a window whereby a busfree just after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * a selection could be missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) test SCSISIGI, BSYI jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) mvi CLRSINT1,CLRBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) or SIMODE1, ENBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) mov SAVED_SCSIID, SCB_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) mov SAVED_LUN, SCB_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) mvi SEQ_FLAGS, NO_CDB_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) or SXFRCTL0, SPIOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * As soon as we get a successful selection, the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * should go into the message out phase since we have ATN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * asserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) mvi MSG_OUT, MSG_IDENTIFYFLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Main loop for information transfer phases. Wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * target to assert REQ before checking MSG, C/D and I/O for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * the bus phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) mesgin_phasemis:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ITloop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) call phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) mov A, LASTPHASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) test A, ~P_DATAIN_DT jz p_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) cmp A,P_COMMAND je p_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) cmp A,P_MESGOUT je p_mesgout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cmp A,P_STATUS je p_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cmp A,P_MESGIN je p_mesgin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) SET_SEQINTCODE(BAD_PHASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) jmp ITloop; /* Try reading the bus again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Command phase. Set up the DMA registers and let 'er rip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) p_command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) SET_SEQINTCODE(PROTO_VIOLATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) p_command_okay:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) jnz p_command_allocate_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Command retry. Free our current FIFO and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * re-allocate a FIFO so transfer state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) p_command_allocate_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) bmov ALLOCFIFO_SCBPTR, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) call allocate_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) add NONE, -17, SCB_CDB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) jnc p_command_embedded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) p_command_from_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) bmov HADDR[0], SCB_HOST_CDB_PTR, 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mvi SG_CACHE_PRE, LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) jmp p_command_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) p_command_embedded:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) bmov SHCNT[0], SCB_CDB_LEN, 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) bmov DFDAT, SCB_CDB_STORE, 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) mvi DFCNTRL, SCSIEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) p_command_xfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) and SEQ_FLAGS, ~NO_CDB_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * To speed up CDB delivery in Rev B, all CDB acks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * are "released" to the output sync as soon as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * command phase starts. There is only one problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * with this approach. If the target changes phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * before all data are sent, we have left over acks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * that can go out on the bus in a data phase. Due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * to other chip contraints, this only happens if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * the target goes to data-in, but if the acks go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * out before we can test SDONE, we'll think that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * the transfer has completed successfully. Work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * around this by taking advantage of the 400ns or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * 800ns dead time between command phase and the REQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * of the new phase. If the transfer has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * successfully, SCSIEN should fall *long* before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * see a phase change. We thus treat any phasemiss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * that occurs before SCSIEN falls as an incomplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) test SSTAT1, PHASEMIS jnz p_command_xfer_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) test DFCNTRL, SCSIEN jnz . - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) test DFCNTRL, SCSIEN jnz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * DMA Channel automatically disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Don't allow a data phase if the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * was not fully transferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) test SSTAT2, SDONE jnz ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) p_command_xfer_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) or SEQ_FLAGS, NO_CDB_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * Status phase. Wait for the data byte to appear, then read it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * and store it into the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) p_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) p_status_okay:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mov SCB_SCSI_STATUS, SCSIDAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) or SCB_CONTROL, STATUS_RCVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * indentify message sequence and send it to the target. The host may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * override this behavior by setting the MK_MESSAGE bit in the SCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * control byte. This will cause us to interrupt the host and allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * it to handle the message phase completely on its own. If the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * associated with this target is set, we will also interrupt the host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * thereby allowing it to send a message on the next selection regardless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * of the transaction being sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * This is done to allow the host to send messages outside of an identify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * sequence while protecting the seqencer from testing the MK_MESSAGE bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * on an SCB that might not be for the current nexus. (For example, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * BDR message in response to a bad reselection would leave us pointed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * an SCB that doesn't have anything to do with the current target).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * bus device reset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * in case the target decides to put us in this phase for some strange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) p_mesgout_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Turn on ATN for the retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mvi SCSISIGO, ATNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) p_mesgout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mov SINDEX, MSG_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) test SCB_CONTROL,MK_MESSAGE jnz host_message_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) p_mesgout_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) test SCB_CONTROL, DISCENB jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) and SINDEX, ~DISCENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Send a tag message if TAG_ENB is set in the SCB control block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * Use SCB_NONPACKET_TAG as the tag value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) p_mesgout_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) mov SCSIDAT, SINDEX; /* Send the identify message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) call phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) call phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) cmp LASTPHASE, P_MESGOUT jne p_mesgout_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) mov SCBPTR jmp p_mesgout_onebyte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Interrupt the driver, and allow it to handle this message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * phase and any required retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) p_mesgout_from_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) cmp SINDEX, HOST_MSG jne p_mesgout_onebyte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) jmp host_message_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) p_mesgout_onebyte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) mvi CLRSINT1, CLRATNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mov SCSIDAT, SINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * If the next bus phase after ATN drops is message out, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * that the target is requesting that the last message(s) be resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) call phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) cmp LASTPHASE, P_MESGOUT je p_mesgout_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) p_mesgout_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) mov LAST_MSG, MSG_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mvi MSG_OUT, MSG_NOOP; /* No message left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * Message in phase. Bytes are read using Automatic PIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) p_mesgin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* read the 1st message byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mvi ACCUM call inb_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) test A,MSG_IDENTIFYFLAG jnz mesgin_identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) cmp A,MSG_DISCONNECT je mesgin_disconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) cmp ALLZEROS,A je mesgin_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) cmp A,MSG_NOOP je mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Pushed message loop to allow the kernel to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * run it's own message state engine. To avoid an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * extra nop instruction after signaling the kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * we perform the phase_lock before checking to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * if we should exit the loop and skip the phase_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * in the ITloop. Performing back to back phase_locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * shouldn't hurt, but why do it twice...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) host_message_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) call phase_lock; /* Benign the first time through. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) SET_SEQINTCODE(HOST_MSG_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cmp RETURN_1, EXIT_MSG_LOOP je ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) mov SCSIDAT, RETURN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) jmp host_message_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Must be CONT_MSG_LOOP_READ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) mov NONE, SCSIDAT; /* ACK Byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) jmp host_message_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mesgin_ign_wide_residue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mov SAVED_MODE, MODE_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) shr NEGOADDR, 4, SAVED_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) mov A, NEGCONOPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) RESTORE_MODE(SAVED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) test A, WIDEXFER jz mesgin_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Pull the residue byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) mvi REG0 call inb_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) cmp REG0, 0x01 jne mesgin_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) SET_SEQINTCODE(IGN_WIDE_RES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) jmp mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) mesgin_proto_violation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) SET_SEQINTCODE(PROTO_VIOLATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) jmp mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) mesgin_reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) mvi MSG_MESSAGE_REJECT call mk_mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) mesgin_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #define INDEX_DISC_LIST(scsiid, lun) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) and A, 0xC0, scsiid; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) or SCBPTR, A, lun; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) clr SCBPTR[1]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) and SINDEX, 0x30, scsiid; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) shr SINDEX, 3; /* Multiply by 2 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mesgin_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Determine whether a target is using tagged or non-tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * transactions by first looking at the transaction stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * the per-device, disconnected array. If there is no untagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * transaction for this target, this must be a tagged transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) bmov DINDEX, SINDEX, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bmov REG0, SINDIR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) cmp REG0[1], SCB_LIST_NULL je snoop_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* Untagged. Clear the busy table entry and setup the SCB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) bmov DINDIR, ALLONES, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) bmov SCBPTR, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) jmp setup_SCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * If we get one, we use the tag returned to find the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * SCB. After receiving the tag, look for the SCB at SCB locations tag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * tag + 256.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) snoop_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) or SEQ_FLAGS, 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) mov NONE, SCSIDAT; /* ACK Identify MSG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) call phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) or SEQ_FLAGS, 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) cmp LASTPHASE, P_MESGIN jne not_found_ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) or SEQ_FLAGS, 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) get_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) clr SCBPTR[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) mvi SCBPTR call inb_next; /* tag value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) verify_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) test SCB_CONTROL,DISCONNECTED jz verify_other_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) mov A, SAVED_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) cmp SCB_SCSIID, A jne verify_other_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) mov A, SAVED_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) cmp SCB_LUN, A je setup_SCB_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) verify_other_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) xor SCBPTR[1], 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) test SCBPTR[1], 0xFF jnz verify_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) jmp not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * Ensure that the SCB the tag points to is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * an SCB transaction to the reconnecting target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) setup_SCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) or SEQ_FLAGS, 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) test SCB_CONTROL,DISCONNECTED jz not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) setup_SCB_disconnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) and SCB_CONTROL,~DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) clr SEQ_FLAGS; /* make note of IDENTIFY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) test SCB_SGPTR, SG_LIST_NULL jnz . + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bmov ALLOCFIFO_SCBPTR, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) call allocate_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* See if the host wants to send a message upon reconnection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) test SCB_CONTROL, MK_MESSAGE jz mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mvi HOST_MSG call mk_mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) jmp mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) SET_SEQINTCODE(NO_MATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) jmp mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) not_found_ITloop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) SET_SEQINTCODE(NO_MATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * We received a "command complete" message. Put the SCB on the complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * queue and trigger a completion interrupt via the idle loop. Before doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * so, check to see if there is a residual or the status byte is something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * other than STATUS_GOOD (0). In either of these conditions, we upload the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * SCB back to the host so it can process this information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mesgin_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * If ATN is raised, we still want to give the target a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Perhaps there was a parity error on this last message byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Either way, the target should take us to message out phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * and then attempt to complete the command again. We should use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * critical section here to guard against a timeout triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * for this command and setting ATN while we are still processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * the completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) test SCSISIGI, ATNI jnz mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * If we are identified and have successfully sent the CDB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * any status will do. Optimize this fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * If the target never sent an identify message but instead went
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * to mesgin to give an invalid message, let the host abort us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * If we recevied good status but never successfully sent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * cdb, abort the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) test SCB_SCSI_STATUS,0xff jnz complete_accepted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) complete_accepted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * See if we attempted to deliver a message but the target ingnored us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) test SCB_CONTROL, MK_MESSAGE jz complete_nomsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) SET_SEQINTCODE(MKMSG_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) complete_nomsg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) call queue_scb_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) jmp await_busfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) freeze_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Cancel any pending select-out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) test SSTAT0, SELDO|SELINGO jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) and SCSISEQ0, ~ENSELO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) mov ACCUM_SAVE, A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) clr A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) add QFREEZE_COUNT, 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) adc QFREEZE_COUNT[1], A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) or SEQ_FLAGS2, SELECTOUT_QFROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mov A, ACCUM_SAVE ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Complete the current FIFO's SCB if data for this same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * SCB is not transferring in the other FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) pkt_complete_scb_if_fifos_idle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) bmov ARG_1, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) mvi DFFSXFRCTL, CLRCHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) bmov SCBPTR, ARG_1, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) test SCB_FIFO_USE_COUNT, 0xFF jnz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) queue_scb_completion:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) test SCB_SCSI_STATUS,0xff jnz bad_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Check for residuals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) bad_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) call freeze_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) upload_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * Restore SCB TAG since we reuse this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * in the sequencer. We don't want to corrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * it on the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) bmov SCB_TAG, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) or SCB_SGPTR, SG_STATUS_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) bmov COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) add_dma_scb_tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) bmov REG0, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) bmov SCBPTR, COMPLETE_DMA_SCB_TAIL, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) bmov SCB_NEXT_COMPLETE, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) bmov COMPLETE_DMA_SCB_TAIL, REG0, 2 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * Is it a disconnect message? Set a flag in the SCB to remind us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * and await the bus going free. If this is an untagged transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * store the SCB id for it in our untagged target table for lookup on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * a reselection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mesgin_disconnect:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * If ATN is raised, we still want to give the target a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * Perhaps there was a parity error on this last message byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * or we want to abort this command. Either way, the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * should take us to message out phase and then attempt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * disconnect again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * XXX - Wait for more testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) test SCSISIGI, ATNI jnz mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) jnz mesgin_proto_violation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) or SCB_CONTROL,DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) test SCB_CONTROL, TAG_ENB jnz await_busfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) queue_disc_scb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) bmov REG0, SCBPTR, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bmov DINDEX, SINDEX, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) bmov DINDIR, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) bmov SCBPTR, REG0, 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) await_busfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) and SIMODE1, ~ENBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * In the BUSFREEREV_BUG case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * busfree status was cleared at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * beginning of the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) mvi CLRSINT1,CLRBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) mov NONE, SCSIDAT; /* Ack the last byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) jnz await_busfree_not_m_dff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) await_busfree_clrchn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) mvi DFFSXFRCTL, CLRCHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) await_busfree_not_m_dff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* clear target specific flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) test SSTAT1,REQINIT|BUSFREE jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * We only set BUSFREE status once either a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * phase has been detected or we are really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * BUSFREE. This allows the driver to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * that we are active on the bus even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * no identified transaction exists should a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * timeout occur while awaiting busfree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) mvi LASTPHASE, P_BUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) test SSTAT1, BUSFREE jnz idle_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) SET_SEQINTCODE(MISSED_BUSFREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * Save data pointers message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * Copying RAM values back to SCB, for Save Data Pointers message, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * only if we've actually been into a data phase to change them. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * protects against bogus data in scratch ram and the residual counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * since they are only initialized when we go into data_in or data_out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * Ack the message as soon as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) mesgin_sdptrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) test SEQ_FLAGS, DPHASE jz ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) call save_pointers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) save_pointers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * If we are asked to save our position at the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * transfer, just mark us at the end rather than perform a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * full save.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) or SCB_SGPTR, SG_LIST_NULL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) save_pointers_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * The SCB_DATAPTR becomes the current SHADDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * All other information comes directly from our residual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) bmov SCB_DATAPTR, SHADDR, 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * Restore pointers message? Data pointers are recopied from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * SCB anytime we enter a data phase for the first time, so all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * we need to do is clear the DPHASE flag and let the data phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * code do the rest. We also reset/reallocate the FIFO to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * sure we have a clean start for the next data or command phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) mesgin_rdptrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) and SEQ_FLAGS, ~DPHASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) msgin_rdptrs_get_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) call allocate_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) jmp mesgin_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) phase_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * Don't ignore persistent REQ assertions just because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * they were asserted within the bus settle delay window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * This allows us to tolerate devices like the GEM318
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * that violate the SCSI spec. We are careful not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * count REQ while we are waiting for it to fall during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * an async phase due to our asserted ACK. Each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * sequencer instruction takes ~25ns, so the REQ must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * last at least 100ns in order to be counted as a true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * REQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) test SCSIPHASE, 0xFF jnz phase_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) test SCSISIGI, ACKI jnz phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) test SCSISIGI, REQI jz phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) test SCSIPHASE, 0xFF jnz phase_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) test SCSISIGI, ACKI jnz phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) test SCSISIGI, REQI jz phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) phase_locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) test SCSIPHASE, 0xFF jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) test SSTAT1, SCSIPERR jnz phase_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) phase_lock_latch_phase:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) and LASTPHASE, PHASE_MASK, SCSISIGI ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * Functions to read data in Automatic PIO mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * An ACK is not sent on input from the target until SCSIDATL is read from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * So we wait until SCSIDATL is latched (the usual way), then read the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * byte directly off the bus using SCSIBUSL. When we have pulled the ATN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * line, or we just want to acknowledge the byte, then we do a dummy read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * from SCISDATL. The SCSI spec guarantees that the target will hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * data byte on the bus until we send our ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * The assumption here is that these are called in a particular sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * and that REQ is already set when inb_first is called. inb_{first,next}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * use the same calling convention as inb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) inb_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) mov NONE,SCSIDAT; /*dummy read from latch to ACK*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) inb_next_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * If there is a parity error, wait for the kernel to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * see the interrupt and prepare our message response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * before continuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) test SCSIPHASE, 0xFF jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) test SSTAT1, SCSIPERR jnz inb_next_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) inb_next_check_phase:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) and LASTPHASE, PHASE_MASK, SCSISIGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) cmp LASTPHASE, P_MESGIN jne mesgin_phasemis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) inb_first:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) clr DINDEX[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) mov DINDEX,SINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) inb_last:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) mk_mesg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mvi SCSISIGO, ATNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) mov MSG_OUT,SINDEX ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) disable_ccsgen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) clr CCSGCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) disable_ccsgen_fetch_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) clr SG_STATE ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) service_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * Do we have any prefetch left???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) test SG_STATE, SEGS_AVAIL jnz idle_sg_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * Can this FIFO have access to the S/G cache yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) test CCSGCTL, SG_CACHE_AVAIL jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /* Did we just finish fetching segs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /* Are we actively fetching segments? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) test CCSGCTL, CCSGENACK jnz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * Should the other FIFO get the S/G cache first? If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * both FIFOs have been allocated since we last checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * any FIFO, it is important that we service a FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * that is not actively on the bus first. This guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * that a FIFO will be freed to handle snapshot requests for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * any FIFO that is still on the bus. Chips with RTI do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * perform snapshots, so don't bother with this test there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if ((ahd->features & AHD_RTI) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * If we're not still receiving SCSI data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * it is safe to allocate the S/G cache to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * this FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) test DFCNTRL, SCSIEN jz idle_sgfetch_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * Switch to the other FIFO. Non-RTI chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * also have the "set mode" bug, so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * disable interrupts during the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) mvi SEQINTCTL, INTVEC1DSL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * If the other FIFO needs loading, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * must not have claimed the S/G cache yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * (SG_CACHE_AVAIL would have been cleared in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * the original FIFO mode and we test this above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * Return to the idle loop so we can process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * FIFO not currently on the bus first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) test SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) clr SEQINTCTL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) idle_sgfetch_okay:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) clr SEQINTCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) idle_sgfetch_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * We fetch a "cacheline aligned" and sized amount of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * so we don't end up referencing a non-existent page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * Cacheline aligned is in quotes because the kernel will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * set the prefetch amount to a reasonable level if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * cacheline size is unknown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mvi SGHCNT, SG_PREFETCH_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * Need two instructions between "touches" of SGHADDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) mvi CCSGCTL, CCSGEN|CCSGRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) or SG_STATE, FETCH_INPROG ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) idle_sgfetch_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * Guard against SG_CACHE_AVAIL activating during sg fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * request in the other FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) test SG_STATE, FETCH_INPROG jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) clr CCSGCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) idle_sg_avail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* Does the hardware have space for another SG entry? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) test DFSTATUS, PRELOAD_AVAIL jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * On the A, preloading a segment before HDMAENACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * comes true can clobber the shadow address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * first segment in the S/G FIFO. Wait until it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * safe to proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) test DFCNTRL, HDMAENACK jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) bmov HADDR, CCSGRAM, 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) bmov HADDR, CCSGRAM, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) bmov HCNT, CCSGRAM, 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* Skip 4 bytes of pad. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) add CCSGADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) sg_advance:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) clr A; /* add sizeof(struct scatter) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) adc SCB_RESIDUAL_SGPTR[1],A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) adc SCB_RESIDUAL_SGPTR[2],A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) adc SCB_RESIDUAL_SGPTR[3],A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) mov SINDEX, SCB_RESIDUAL_SGPTR[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) or SINDEX, LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) clr SG_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) mov SG_CACHE_PRE, SINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * Use SCSIENWRDIS so that SCSIEN is never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * modified by this operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) or DFCNTRL, PRELOADEN|HDMAEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * Do we have another segment in the cache?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) jnc return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) and SG_STATE, ~SEGS_AVAIL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * Initialize the DMA address and counter from the SCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) load_first_seg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) bmov HADDR, SCB_DATAPTR, 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) test SCB_DATACNT[3], SG_LAST_SEG jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) or REG_ISR, LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) mov SG_CACHE_PRE, REG_ISR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * Since we've are entering a data phase, we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * rely on the SCB_RESID* fields. Initialize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * residual and clear the full residual flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) and SCB_SGPTR[0], ~SG_FULL_RESID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* If we need more S/G elements, tell the idle loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) mvi SG_STATE, LOADING_NEEDED ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) clr SG_STATE ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) p_data_handle_xfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) test SG_STATE, LOADING_NEEDED jnz service_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) p_data_clear_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) or LONGJMP_ADDR[1], INVALID_ADDR ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) p_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) SET_SEQINTCODE(PROTO_VIOLATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) p_data_allowed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) test SEQ_FLAGS, DPHASE jz data_phase_initialize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * If we re-enter the data phase after going through another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * phase, our transfer location has almost certainly been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * corrupted by the interveining, non-data, transfers. Ask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * the host driver to fix us up based on the transfer residual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * unless we already know that we should be bitbucketing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) SET_SEQINTCODE(PDATA_REINIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) jmp data_phase_inbounds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) p_data_bitbucket:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * Turn on `Bit Bucket' mode, wait until the target takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * us to another phase, and then notify the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) mov SAVED_MODE, MODE_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) jnz bitbucket_not_m_dff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * Ensure that any FIFO contents are cleared out and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * FIFO free'd prior to starting the BITBUCKET. BITBUCKET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * doesn't discard data already in the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) mvi DFFSXFRCTL, RSTCHN|CLRSHCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) bitbucket_not_m_dff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) or SXFRCTL1,BITBUCKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* Wait for non-data phase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) test SCSIPHASE, ~DATA_PHASE_MASK jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) and SXFRCTL1, ~BITBUCKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) RESTORE_MODE(SAVED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) SET_SEQINTCODE(DATA_OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) data_phase_initialize:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) call load_first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) data_phase_inbounds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* We have seen a data phase at least once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) or SEQ_FLAGS, DPHASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) mov SAVED_MODE, MODE_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) test SG_STATE, LOADING_NEEDED jz data_group_dma_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) call p_data_handle_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) data_group_dma_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * The transfer is complete if either the last segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * completes or the target changes phase. Both conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * will clear SCSIEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) call idle_loop_service_fifos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) call idle_loop_cchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) call idle_loop_gsfifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) RESTORE_MODE(SAVED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) test DFCNTRL, SCSIEN jnz data_group_dma_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) data_group_dmafinish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * The transfer has terminated either due to a phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * change, and/or the completion of the last segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * We have two goals here. Do as much other work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * as possible while the data fifo drains on a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * and respond as quickly as possible to the standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * messages (save data pointers/disconnect and command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * complete) that usually follow a data phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) call calc_residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * Go ahead and shut down the DMA engine now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) test DFCNTRL, DIRECTION jnz data_phase_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) data_group_fifoflush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) or DFCNTRL, FIFOFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * We have enabled the auto-ack feature. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * that the controller may have already transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * some overrun bytes into the data FIFO and acked them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * on the bus. The only way to detect this situation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * to wait for LAST_SEG_DONE to come true on a completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * transfer and then test to see if the data FIFO is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * non-empty. We know there is more data yet to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * if SG_LIST_NULL is not yet set, thus there cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * an overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) test SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) test DFSTATUS, FIFOEMP jnz data_phase_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* Overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) jmp p_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) data_phase_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * If the target has left us in data phase, loop through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * the dma code again. We will only loop if there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * data overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if ((ahd->flags & AHD_TARGETROLE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) test SSTAT0, TARGET jnz data_phase_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if ((ahd->flags & AHD_INITIATORROLE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) test SSTAT1, REQINIT jz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) test SCSIPHASE, DATA_PHASE_MASK jnz p_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) data_phase_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* Kill off any pending prefetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) call disable_ccsgen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if ((ahd->flags & AHD_TARGETROLE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) test SEQ_FLAGS, DPHASE_PENDING jz ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) and SEQ_FLAGS, ~DPHASE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * For data-in phases, wait for any pending acks from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * initiator before changing phase. We only need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * send Ignore Wide Residue messages for data-in phases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) test DFCNTRL, DIRECTION jz target_ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) test SSTAT1, REQINIT jnz .;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) SET_MODE(M_SCSI, M_SCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) test NEGCONOPTS, WIDEXFER jz target_ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * Issue an Ignore Wide Residue Message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) mvi P_MESGIN|BSYO call change_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) mvi MSG_IGN_WIDE_RESIDUE call target_outb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) mvi 1 call target_outb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) jmp target_ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * We assume that, even though data may still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * transferring to the host, that the SCSI side of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * the DMA engine is now in a static state. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * allows us to update our notion of where we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * in this transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * If, by chance, we stopped before being able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * to fetch additional segments for this transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * yet the last S/G was completely exhausted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * call our idle loop until it is able to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * another segment. This will allow us to immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * pickup on the next segment on the next data phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * If we happened to stop on the last segment, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * our residual information is still correct from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * the idle loop and there is no need to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * any fixups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) residual_before_last_seg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) test MDFFSTAT, SHVALID jnz sgptr_fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * Can never happen from an interrupt as the packetized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * hardware will only interrupt us once SHVALID or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * LAST_SEG_DONE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) call idle_loop_service_fifos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) RESTORE_MODE(SAVED_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) calc_residual:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* Record if we've consumed all S/G entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) test MDFFSTAT, SHVALID jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) sgptr_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * Fixup the residual next S/G pointer. The S/G preload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * feature of the chip allows us to load two elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * in addition to the currently active element. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * store the bottom byte of the next S/G pointer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * the SG_CACHE_PTR register so we can restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * correct value when the DMA completes. If the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * sg ptr value has advanced to the point where higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * bytes in the address have been affected, fix them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) add SCB_RESIDUAL_SGPTR[1], -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) adc SCB_RESIDUAL_SGPTR[2], -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) adc SCB_RESIDUAL_SGPTR[3], -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) sgptr_fixup_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) export timer_isr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) call issue_cmdcmplt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * In H2A4, the mode pointer is not saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * for intvec2, but is restored on iret.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * This can lead to the restoration of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * bogus mode ptr. Manually clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * intmask bits and do a normal return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * to compensate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) and SEQINTCTL, ~(INTMASK2|INTMASK1) ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) export seq_isr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if ((ahd->features & AHD_RTI) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * On RevA Silicon, if the target returns us to data-out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * after we have already trained for data-out, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * possible for us to transition the free running clock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * data-valid before the required 100ns P1 setup time (8 P1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * assertions in fast-160 mode). This will only happen if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * this L-Q is a continuation of a data transfer for which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * we have already prefetched data into our FIFO (LQ/Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * followed by LQ/Data for the same write transaction).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * This can cause some target implementations to miss the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * first few data transfers on the bus. We detect this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * situation by noticing that this is the first data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * after an LQ (LQIWORKONLQ true), that the data transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * a continuation of a transfer already setup in our FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * (SAVEPTRS interrupt), and that the transaction is a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * (DIRECTION set in DFCNTRL). The delay is performed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * disabling SCSIEN until we see the first REQ from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * First instruction in an ISR cannot be a branch on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * Rev A. Snapshot LQISTAT2 so the status is not missed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * and deffer the test by one instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) mov REG_ISR, LQISTAT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) test REG_ISR, LQIWORKONLQ jz main_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) test SEQINTSRC, SAVEPTRS jz main_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * Switch to the active FIFO after clearing the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * savepointer in the current FIFO. We do this so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * a pending CTXTDONE or SAVEPTR is visible in the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * FIFO. This status is the only way we can detect if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * have lost the race (e.g. host paused us) and our attempts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * to disable the channel occurred after all REQs were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * already seen and acked (REQINIT never comes true).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) mvi DFFSXFRCTL, CLRCHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) test DFCNTRL, DIRECTION jz interrupt_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) and DFCNTRL, ~SCSIEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) snapshot_wait_data_valid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) test SSTAT1, REQINIT jz snapshot_wait_data_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) snapshot_data_valid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) or DFCNTRL, SCSIEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) snapshot_saveptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) mvi DFFSXFRCTL, CLRCHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) main_isr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) test SEQINTSRC, CFG4DATA jnz cfg4data_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) test SEQINTSRC, SAVEPTRS jnz saveptr_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) SET_SEQINTCODE(INVALID_SEQINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * There are two types of save pointers interrupts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * The first is a snapshot save pointers where the current FIFO is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * active and contains a snapshot of the current poniter information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * This happens between packets in a stream for a single L_Q. Since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * are not performing a pointer save, we can safely clear the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * so it can be used for other transactions. On RTI capable controllers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * where snapshots can, and are, disabled, the code to handle this type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * of snapshot is not active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * The second case is a save pointers on an active FIFO which occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * if the target changes to a new L_Q or busfrees/QASes and the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * has a residual. This should occur coincident with a ctxtdone. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * disable the interrupt and allow our active routine to handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * save.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) saveptr_intr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if ((ahd->features & AHD_RTI) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) saveptr_active_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) and SEQIMODE, ~ENSAVEPTRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) cfg4data_intr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) call load_first_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) call pkt_handle_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) inc SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) cfg4istat_intr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) call freeze_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) add NONE, -13, SCB_CDB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) jnc cfg4istat_have_sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * Host sets up address/count and enables transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) SET_SEQINTCODE(CFG4ISTAT_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) jmp cfg4istat_setup_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) cfg4istat_have_sense_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) bmov HADDR, SCB_SENSE_BUSADDR, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) mvi SG_CACHE_PRE, LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) cfg4istat_setup_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * Status pkt is transferring to host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Wait in idle loop for transfer to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * If a command completed before an attempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * task management function completed, notify the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) cfg4istat_no_taskmgmt_func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) call pkt_handle_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) cfg4icmd_intr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * In the case of DMAing a CDB from the host, the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * CDB buffer is formatted with an 8 byte address followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * by a 1 byte count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) bmov HADDR[0], SCB_HOST_CDB_PTR, 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) mvi SG_CACHE_PRE, LAST_SEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) call pkt_handle_cdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) or SEQINTCTL, IRET ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * See if the target has gone on in this context creating an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * overrun condition. For the write case, the hardware cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * ack bytes until data are provided. So, if the target begins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * another packet without changing contexts, implying we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * not sitting on a packet boundary, we are in an overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * situation. For the read case, the hardware will continue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * ack bytes into the FIFO, and may even ack the last overrun packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * into the FIFO. If the FIFO should become non-empty, we are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * a read overrun case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) #define check_overrun \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* Not on a packet boundary. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) test MDFFSTAT, DLZERO jz pkt_handle_overrun; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) test DFSTATUS, FIFOEMP jz pkt_handle_overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) pkt_handle_xfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) test SG_STATE, LOADING_NEEDED jz pkt_last_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) test SCSISIGO, ATNO jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) test SSTAT2, NONPACKREQ jz pkt_service_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * Defer handling of this NONPACKREQ until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * can be sure it pertains to this FIFO. SAVEPTRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * will not be asserted if the NONPACKREQ is for us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * so we must simulate it if shadow is valid. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * shadow is not valid, keep running this FIFO until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * have satisfied the transfer by loading segments and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * waiting for either shadow valid or last_seg_done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) test MDFFSTAT, SHVALID jnz pkt_saveptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) pkt_service_fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) test SG_STATE, LOADING_NEEDED jnz service_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) pkt_last_seg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) test SCSISIGO, ATNO jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) test SSTAT2, NONPACKREQ jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) test MDFFSTAT, SHVALID jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * Either a SAVEPTRS interrupt condition is pending for this FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * or we have a pending NONPACKREQ for this FIFO. We differentiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * between the two by capturing the state of the SAVEPTRS interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * prior to clearing this status and executing the common code for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * these two cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) pkt_saveptrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) or DFCNTRL, FIFOFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) mov REG0, SEQINTSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) call calc_residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) call save_pointers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) mvi CLRSEQINTSRC, CLRSAVEPTRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) call disable_ccsgen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) or SEQIMODE, ENSAVEPTRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * Keep a handler around for this FIFO until it drains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * to the host to guarantee that we don't complete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * command to the host before the data arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) pkt_saveptrs_wait_fifoemp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) test DFSTATUS, FIFOEMP jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) pkt_saveptrs_check_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) test REG0, SAVEPTRS jz unexpected_nonpkt_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) dec SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) mvi DFFSXFRCTL, CLRCHN ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * LAST_SEG_DONE status has been seen in the current FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * This indicates that all of the allowed data for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * command has transferred across the SCSI and host buses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * Check for overrun and see if we can complete this command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) pkt_last_seg_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * Mark transfer as completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) or SCB_SGPTR, SG_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * Wait for the current context to finish to verify that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * no overrun condition has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) pkt_wait_ctxt_done_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * A sufficiently large overrun or a NONPACKREQ may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * prevent CTXTDONE from ever asserting, so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * poll for these statuses too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) check_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) test SSTAT2, NONPACKREQ jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) pkt_ctxt_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) check_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * If status has been received, it is safe to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * the check to see if another FIFO is active because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * LAST_SEG_DONE has been observed. However, we check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * the FIFO anyway since it costs us only one extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * instruction to leverage common code to perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * SCB completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) dec SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) mvi DFFSXFRCTL, CLRCHN ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * Must wait until CDB xfer is over before issuing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * clear channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) pkt_handle_cdb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) test SG_CACHE_SHADOW, LAST_SEG_DONE jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) mvi DFFSXFRCTL, CLRCHN ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * Watch over the status transfer. Our host sense buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * large enough to take the maximum allowed status packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * None-the-less, we must still catch and report overruns to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * the host. Additionally, properly catch unexpected non-packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * phases that are typically caused by CRC errors in status packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) pkt_handle_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) pkt_status_IU_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) or DFCNTRL, FIFOFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) test DFSTATUS, FIFOEMP jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) BEGIN_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) or SCB_CONTROL, STATUS_RCVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) jmp pkt_complete_scb_if_fifos_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) END_CRITICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) pkt_status_check_overrun:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * Status PKT overruns are uncerimoniously recovered with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * bus reset. If we've overrun, let the host know so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * recovery can be performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * LAST_SEG_DONE has been observed. If either CTXTDONE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * a NONPACKREQ phase change have occurred and the FIFO is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * empty, there is no overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) test DFSTATUS, FIFOEMP jz pkt_status_report_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) test SEQINTSRC, CTXTDONE jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) test DFSTATUS, FIFOEMP jnz pkt_status_IU_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) test SCSIPHASE, ~DATA_PHASE_MASK jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) pkt_status_report_overrun:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) SET_SEQINTCODE(STATUS_OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /* SEQUENCER RESTARTED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) pkt_status_check_nonpackreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * CTXTDONE may be held off if a NONPACKREQ is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * the current context. If a NONPACKREQ is observed, decide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * if it is for the current context. If it is for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * context, we must defer NONPACKREQ processing until all data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * has transferred to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) test SCSIPHASE, ~DATA_PHASE_MASK jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) test SCSISIGO, ATNO jnz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) test SSTAT2, NONPACKREQ jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) test DFSTATUS, FIFOEMP jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * The unexpected nonpkt phase handler assumes that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * data channel use will have a FIFO reference count. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * turns out that the status handler doesn't need a references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * count since the status received flag, and thus completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * processing, cannot be set until the handler is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * We increment the count here to make the nonpkt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * happy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) inc SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * Nonpackreq is a polled status. It can come true in three situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * we have received an L_Q, we have sent one or more L_Qs, or there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * L_Q context associated with this REQ (REQ occurs immediately after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * (re)selection). Routines that know that the context responsible for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * top level idle loop, we exhaust all active contexts prior to determining that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * we simply do not have the full I_T_L_Q for this phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) unexpected_nonpkt_phase_find_ctxt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * This nonpackreq is most likely associated with one of the tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * in a FIFO or an outgoing LQ. Only treat it as an I_T only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * nonpackreq if we've cleared out the FIFOs and handled any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) * pending SELDO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) SET_SRC_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) SET_DST_MODE M_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) and A, FIFO1FREE|FIFO0FREE, DFFSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) cmp A, FIFO1FREE|FIFO0FREE jne return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) test SSTAT0, SELDO jnz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) mvi SCBPTR[1], SCB_LIST_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) unexpected_nonpkt_phase:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) jnz unexpected_nonpkt_mode_cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) SET_SRC_MODE M_DFF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) SET_DST_MODE M_DFF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) dec SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) mvi DFFSXFRCTL, CLRCHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) unexpected_nonpkt_mode_cleared:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) mvi CLRSINT2, CLRNONPACKREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * Test to ensure that the bus has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * already gone free prior to clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * any stale busfree status. This avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * a window whereby a busfree just after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * a selection could be missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) test SCSISIGI, BSYI jz . + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) mvi CLRSINT1,CLRBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) or SIMODE1, ENBUSFREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) SET_SEQINTCODE(ENTERING_NONPACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) illegal_phase:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) SET_SEQINTCODE(ILLEGAL_PHASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) jmp ITloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * We have entered an overrun situation. If we have working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * BITBUCKET, flip that on and let the hardware eat any overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * data. Otherwise use an overrun buffer in the host to simulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * BITBUCKET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) pkt_handle_overrun_inc_use_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) inc SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) pkt_handle_overrun:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) SET_SEQINTCODE(CFG4OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) call freeze_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) or DFFSXFRCTL, DFFBITBUCKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) SET_SRC_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) SET_DST_MODE M_DFF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) call load_overrun_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) call setjmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) call load_overrun_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) or DFCNTRL, PRELOADEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) overrun_load_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) test SEQINTSRC, CTXTDONE jnz pkt_overrun_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) test SSTAT2, NONPACKREQ jz return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) pkt_overrun_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) dec SCB_FIFO_USE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) or LONGJMP_ADDR[1], INVALID_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) mvi DFFSXFRCTL, CLRCHN ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) load_overrun_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * Load a dummy segment if preload space is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) mov HADDR[0], SHARED_DATA_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) mov ACCUM_SAVE, A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) clr A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) adc HADDR[2], A, SHARED_DATA_ADDR[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) adc HADDR[3], A, SHARED_DATA_ADDR[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) mov A, ACCUM_SAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) bmov HADDR[4], ALLZEROS, 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) clr HCNT[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) clr HCNT[2] ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }