^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * linux/drivers/scsi/esas2r/esas2r_init.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2001-2013 ATTO Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modify it under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * as published by the Free Software Foundation; either version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * of the License, or (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * NO WARRANTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * solely responsible for determining the appropriateness of using and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * distributing the Program and assumes all risks associated with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * exercise of rights under this Agreement, including but not limited to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * the risks and costs of program errors, damage to or loss of data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * programs or equipment, and unavailability or interruption of operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * DISCLAIMER OF LIABILITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "esas2r.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct esas2r_mem_desc *mem_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) mem_desc->esas2r_param = mem_desc->size + align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) mem_desc->virt_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) mem_desc->phys_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) (size_t)mem_desc->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) esas2r_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) (dma_addr_t *)&mem_desc->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (mem_desc->esas2r_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) "failed to allocate %lu bytes of consistent memory!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) (long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int)mem_desc->esas2r_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) memset(mem_desc->virt_addr, 0, mem_desc->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void esas2r_initmem_free(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct esas2r_mem_desc *mem_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (mem_desc->virt_addr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Careful! phys_addr and virt_addr may have been adjusted from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * original allocation in order to return the desired alignment. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * means we have to use the original address (in esas2r_data) and size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * (esas2r_param) and calculate the original physical address based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the difference between the requested and actual allocation size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (mem_desc->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int unalign = ((u8 *)mem_desc->virt_addr) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ((u8 *)mem_desc->esas2r_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) (size_t)mem_desc->esas2r_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mem_desc->esas2r_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) (dma_addr_t)(mem_desc->phys_addr - unalign));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kfree(mem_desc->esas2r_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mem_desc->virt_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static bool alloc_vda_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct esas2r_mem_desc *memdesc = kzalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sizeof(struct esas2r_mem_desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (memdesc == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) esas2r_hdebug("could not alloc mem for vda request memdesc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) memdesc->size = sizeof(union atto_vda_req) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ESAS2R_DATA_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!esas2r_initmem_alloc(a, memdesc, 256)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) esas2r_hdebug("could not alloc mem for vda request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kfree(memdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) a->num_vrqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) list_add(&memdesc->next_desc, &a->vrq_mds_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rq->vrq_md = memdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) rq->vrq->scsi.handle = a->num_vrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void esas2r_unmap_regions(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (a->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) iounmap((void __iomem *)a->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) a->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pci_release_region(a->pcid, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (a->data_window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) iounmap((void __iomem *)a->data_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) a->data_window = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pci_release_region(a->pcid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static int esas2r_map_regions(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) a->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) a->data_window = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) error = pci_request_region(a->pcid, 2, a->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (error != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "pci_request_region(2) failed, error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pci_resource_len(a->pcid, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (a->regs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) "ioremap failed for regs mem region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pci_release_region(a->pcid, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) error = pci_request_region(a->pcid, 0, a->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (error != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) "pci_request_region(2) failed, error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) esas2r_unmap_regions(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pci_resource_len(a->pcid, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (a->data_window == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "ioremap failed for data_window mem region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) esas2r_unmap_regions(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Set up interrupt mode based on the requested value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) switch (intr_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case INTR_MODE_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) use_legacy_interrupts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) a->intr_mode = INTR_MODE_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case INTR_MODE_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) i = pci_enable_msi(a->pcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (i != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "failed to enable MSI for adapter %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) "falling back to legacy interrupts "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) "(err=%d)", a->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto use_legacy_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) a->intr_mode = INTR_MODE_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) set_bit(AF2_MSI_ENABLED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "unknown interrupt_mode %d requested, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) "falling back to legacy interrupt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) interrupt_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto use_legacy_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void esas2r_claim_interrupts(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (a->intr_mode == INTR_MODE_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) flags |= IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) esas2r_log(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) a->pcid->irq, a, a->name, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (request_irq(a->pcid->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) (a->intr_mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) INTR_MODE_LEGACY) ? esas2r_interrupt :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) esas2r_msi_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) a->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) a->pcid->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) set_bit(AF2_IRQ_CLAIMED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) esas2r_log(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "claimed IRQ %d flags: 0x%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) a->pcid->irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct esas2r_adapter *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u64 bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void *next_uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct esas2r_request *first_request, *last_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) bool dma64 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (index >= MAX_ADAPTERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) "tried to init invalid adapter index %u!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (esas2r_adapters[index]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) "tried to init existing adapter index %u!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) a = (struct esas2r_adapter *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) memset(a, 0, sizeof(struct esas2r_adapter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) a->pcid = pcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) a->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (sizeof(dma_addr_t) > 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dma64 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) esas2r_kill_adapter(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) esas2r_adapters[index] = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) esas2r_debug("new adapter %p, name %s", a, a->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_lock_init(&a->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) spin_lock_init(&a->fw_event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mutex_init(&a->fm_api_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) mutex_init(&a->fs_api_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) sema_init(&a->nvram_semaphore, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) esas2r_fw_event_off(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) a->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) init_waitqueue_head(&a->buffered_ioctl_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) init_waitqueue_head(&a->nvram_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) init_waitqueue_head(&a->fm_api_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) init_waitqueue_head(&a->fs_api_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) init_waitqueue_head(&a->vda_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) INIT_LIST_HEAD(&a->general_req.req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) INIT_LIST_HEAD(&a->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) INIT_LIST_HEAD(&a->defer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) INIT_LIST_HEAD(&a->free_sg_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) INIT_LIST_HEAD(&a->avail_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) INIT_LIST_HEAD(&a->vrq_mds_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) INIT_LIST_HEAD(&a->fw_event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) first_request = (struct esas2r_request *)((u8 *)(a + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) for (last_request = first_request, i = 1; i < num_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) last_request++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) INIT_LIST_HEAD(&last_request->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) list_add_tail(&last_request->comp_list, &a->avail_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!alloc_vda_req(a, last_request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) "failed to allocate a VDA request!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) esas2r_kill_adapter(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) esas2r_debug("requests: %p to %p (%d, %d)", first_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) last_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) sizeof(*first_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) num_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (esas2r_map_regions(a) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) esas2r_kill_adapter(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) a->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* interrupts will be disabled until we are done with init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) atomic_inc(&a->dis_ints_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) atomic_inc(&a->disable_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) set_bit(AF_CHPRST_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) set_bit(AF_DISC_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) set_bit(AF_FIRST_INIT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) set_bit(AF_LEGACY_SGE_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) a->init_msg = ESAS2R_INIT_MSG_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) a->max_vdareq_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) a->build_sgl = esas2r_build_sg_list_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) esas2r_setup_interrupts(a, interrupt_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) a->uncached_size = esas2r_get_uncached_size(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) a->uncached = dma_alloc_coherent(&pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) (size_t)a->uncached_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) (dma_addr_t *)&bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (a->uncached == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) "failed to allocate %d bytes of consistent memory!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) a->uncached_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) esas2r_kill_adapter(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) a->uncached_phys = bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) a->uncached_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) a->uncached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) upper_32_bits(bus_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) lower_32_bits(bus_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) memset(a->uncached, 0, a->uncached_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) next_uncached = a->uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (!esas2r_init_adapter_struct(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) &next_uncached)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) "failed to initialize adapter structure (2)!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) esas2r_kill_adapter(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) tasklet_init(&a->tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) esas2r_adapter_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (unsigned long)a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Disable chip interrupts to prevent spurious interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * until we claim the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) esas2r_disable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) esas2r_check_adapter(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!esas2r_init_adapter_hw(a, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) esas2r_debug("esas2r_init_adapter ok");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) esas2r_claim_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) esas2r_enable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) set_bit(AF2_INIT_DONE, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) esas2r_kickoff_timer(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) esas2r_debug("esas2r_init_adapter done for %p (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) a, a->disable_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void esas2r_adapter_power_down(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int power_management)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct esas2r_mem_desc *memdesc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if ((test_bit(AF2_INIT_DONE, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!power_management) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) del_timer_sync(&a->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tasklet_kill(&a->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) esas2r_power_down(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * There are versions of firmware that do not handle the sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * cache command correctly. Stall here to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * cache is lazily flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mdelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) esas2r_debug("chip halted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Remove sysfs binary files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (a->sysfs_fw_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) a->sysfs_fw_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (a->sysfs_fs_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) a->sysfs_fs_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (a->sysfs_vda_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) a->sysfs_vda_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (a->sysfs_hw_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) a->sysfs_hw_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (a->sysfs_live_nvram_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) sysfs_remove_bin_file(&a->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &bin_attr_live_nvram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) a->sysfs_live_nvram_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (a->sysfs_default_nvram_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sysfs_remove_bin_file(&a->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) &bin_attr_default_nvram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) a->sysfs_default_nvram_created = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Clean up interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) &(a->pcid->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) "free_irq(%d) called", a->pcid->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) free_irq(a->pcid->irq, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) esas2r_debug("IRQ released");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pci_disable_msi(a->pcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) clear_bit(AF2_MSI_ENABLED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) esas2r_debug("MSI disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (a->inbound_list_md.virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) esas2r_initmem_free(a, &a->inbound_list_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (a->outbound_list_md.virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) esas2r_initmem_free(a, &a->outbound_list_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) next_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) esas2r_initmem_free(a, memdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Following frees everything allocated via alloc_vda_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) esas2r_initmem_free(a, memdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) list_del(&memdesc->next_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) kfree(memdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) kfree(a->first_ae_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) a->first_ae_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) kfree(a->sg_list_mds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) a->sg_list_mds = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) kfree(a->req_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) a->req_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (a->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) esas2r_unmap_regions(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) a->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) a->data_window = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) esas2r_debug("regions unmapped");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Release/free allocated resources for specified adapters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) void esas2r_kill_adapter(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct esas2r_adapter *a = esas2r_adapters[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) esas2r_debug("killing adapter %p [%d] ", a, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) esas2r_fw_event_off(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) esas2r_adapter_power_down(a, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (esas2r_buffered_ioctl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (a->pcid == esas2r_buffered_ioctl_pcid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) (size_t)esas2r_buffered_ioctl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) esas2r_buffered_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) esas2r_buffered_ioctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) esas2r_buffered_ioctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (a->vda_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) (size_t)VDA_MAX_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) a->vda_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) (dma_addr_t)a->ppvda_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) a->vda_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (a->fs_api_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) (size_t)a->fs_api_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) a->fs_api_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) (dma_addr_t)a->ppfs_api_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) a->fs_api_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) kfree(a->local_atto_ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) a->local_atto_ioctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) spin_lock_irqsave(&a->fw_event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) wq = a->fw_event_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) a->fw_event_q = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_unlock_irqrestore(&a->fw_event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) destroy_workqueue(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (a->uncached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) (size_t)a->uncached_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) a->uncached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) (dma_addr_t)a->uncached_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) a->uncached = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) esas2r_debug("uncached area freed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) &(a->pcid->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) "pci_disable_device() called. msix_enabled: %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "msi_enabled: %d irq: %d pin: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) a->pcid->msix_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) a->pcid->msi_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) a->pcid->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) a->pcid->pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) &(a->pcid->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) "before pci_disable_device() enable_cnt: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) a->pcid->enable_cnt.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pci_disable_device(a->pcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) &(a->pcid->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "after pci_disable_device() enable_cnt: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) a->pcid->enable_cnt.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) &(a->pcid->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) "pci_set_drv_data(%p, NULL) called",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) a->pcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pci_set_drvdata(a->pcid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) esas2r_adapters[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (test_bit(AF2_INIT_DONE, &a->flags2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) clear_bit(AF2_INIT_DONE, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) set_bit(AF_DEGRADED_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) &(a->host->shost_gendev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) "scsi_remove_host() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) scsi_remove_host(a->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) esas2r_log_dev(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) &(a->host->shost_gendev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) "scsi_host_put() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) scsi_host_put(a->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct Scsi_Host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) u32 device_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) esas2r_adapter_power_down(a, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) device_state = pci_choose_state(pdev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) "pci_save_state() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) "pci_disable_device() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) "pci_set_power_state() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pci_set_power_state(pdev, device_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int esas2r_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct Scsi_Host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int rez;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) "pci_set_power_state(PCI_D0) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) "pci_enable_wake(PCI_D0, 0) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) "called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) pci_enable_wake(pdev, PCI_D0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) "pci_restore_state() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) "pci_enable_device() called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rez = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rez = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) goto error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (esas2r_map_regions(a) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rez = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Set up interupt mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) esas2r_setup_interrupts(a, a->intr_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Disable chip interrupts to prevent spurious interrupts until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * claim the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) esas2r_disable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!esas2r_power_up(a, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) esas2r_debug("yikes, esas2r_power_up failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rez = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) esas2r_claim_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Now that system interrupt(s) are claimed, we can enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * chip interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) esas2r_enable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) esas2r_kickoff_timer(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) esas2r_debug("yikes, unable to claim IRQ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rez = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto error_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) error_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rez);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return rez;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) set_bit(AF_DEGRADED_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) "setting adapter to degraded mode: %s\n", error_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return sizeof(struct esas2r_sas_nvram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) + 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) + (num_sg_lists * (u16)sgl_page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) + ALIGN((num_requests + num_ae_requests + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ESAS2R_LIST_EXTRA) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) sizeof(struct esas2r_inbound_list_source_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) + ALIGN((num_requests + num_ae_requests + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ESAS2R_LIST_EXTRA) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) sizeof(struct atto_vda_ob_rsp), 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) + 256; /* VDA request and buffer align */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (pci_is_pcie(a->pcid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u16 devcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) PCI_EXP_DEVCTL_READRQ_512B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) esas2r_log(ESAS2R_LOG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) "max read request size > 512B");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) devcontrol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Determine the organization of the uncached data area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * finish initializing the adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) void **uncached_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) u8 *high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct esas2r_inbound_list_source_entry *element;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct esas2r_mem_desc *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) spin_lock_init(&a->sg_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_lock_init(&a->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_lock_init(&a->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!alloc_vda_req(a, &a->general_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) esas2r_hdebug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) "failed to allocate a VDA request for the general req!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* allocate requests for asynchronous events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) a->first_ae_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kcalloc(num_ae_requests, sizeof(struct esas2r_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (a->first_ae_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) "failed to allocate memory for asynchronous events");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* allocate the S/G list memory descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (a->sg_list_mds == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) "failed to allocate memory for s/g list descriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* allocate the request table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) a->req_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) kcalloc(num_requests + num_ae_requests + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) sizeof(struct esas2r_request *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (a->req_table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) "failed to allocate memory for the request table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* initialize PCI configuration space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) esas2r_init_pci_cfg_space(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * the thunder_stream boards all have a serial flash part that has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * different base address on the AHB bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) a->flags2 |= AF2_THUNDERBOLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (test_bit(AF2_THUNDERBOLT, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) a->flags2 |= AF2_SERIAL_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (a->pcid->subsystem_device == ATTO_TLSH_1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) a->flags2 |= AF2_THUNDERLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Uncached Area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) high = (u8 *)*uncached_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* initialize the scatter/gather table pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) sgl->size = sgl_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Allow the driver to load if the minimum count met. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (i < NUM_SGL_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* compute the size of the lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) a->list_size = num_requests + ESAS2R_LIST_EXTRA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* allocate the inbound list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) a->inbound_list_md.size = a->list_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sizeof(struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) esas2r_inbound_list_source_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) esas2r_hdebug("failed to allocate IB list");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* allocate the outbound list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) a->outbound_list_md.size = a->list_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) sizeof(struct atto_vda_ob_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ESAS2R_LIST_ALIGN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) esas2r_hdebug("failed to allocate IB list");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* allocate the NVRAM structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) a->nvram = (struct esas2r_sas_nvram *)high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) high += sizeof(struct esas2r_sas_nvram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* allocate the discovery buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) a->disc_buffer = high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) high += ESAS2R_DISC_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) high = PTR_ALIGN(high, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* allocate the outbound list copy pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) a->outbound_copy = (u32 volatile *)high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) high += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!test_bit(AF_NVR_VALID, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) esas2r_nvram_set_defaults(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* update the caller's uncached memory area pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) *uncached_area = (void *)high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* initialize the allocated memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (test_bit(AF_FIRST_INIT, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) esas2r_targ_db_initialize(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* prime parts of the inbound list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) element =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) (struct esas2r_inbound_list_source_entry *)a->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) inbound_list_md.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) for (i = 0; i < a->list_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) element->address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) element->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) element->length = cpu_to_le32(HWILSE_INTERFACE_F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) | (sizeof(union
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) atto_vda_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) element++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* init the AE requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) INIT_LIST_HEAD(&rq->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!alloc_vda_req(a, rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) esas2r_hdebug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) "failed to allocate a VDA request!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) esas2r_rq_init_request(rq, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* override the completion function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) rq->comp_cb = esas2r_ae_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* This code will verify that the chip is operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) bool esas2r_check_adapter(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) u32 starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) u32 doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) u64 ppaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) u32 dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * if the chip reset detected flag is set, we can bypass a bunch of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (test_bit(AF_CHPRST_DETECTED, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) goto skip_chip_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * may have left them enabled or we may be recovering from a fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * wait for the firmware to become ready by forcing an interrupt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * waiting for a response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) starttime = jiffies_to_msecs(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) esas2r_force_interrupt(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (doorbell == 0xFFFFFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Give the firmware up to two seconds to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * register access after a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return esas2r_set_degraded_mode(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "unable to access registers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else if (doorbell & DRBL_FORCE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) u32 ver = (doorbell & DRBL_FW_VER_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * This driver supports version 0 and version 1 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * the API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) esas2r_write_register_dword(a, MU_DOORBELL_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (ver == DRBL_FW_VER_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) set_bit(AF_LEGACY_SGE_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) a->max_vdareq_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) a->build_sgl = esas2r_build_sg_list_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) } else if (ver == DRBL_FW_VER_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) a->max_vdareq_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) a->build_sgl = esas2r_build_sg_list_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return esas2r_set_degraded_mode(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) "unknown firmware version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) schedule_timeout_interruptible(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) esas2r_hdebug("FW ready TMO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) esas2r_bugon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return esas2r_set_degraded_mode(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) "firmware start has timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* purge any asynchronous events since we will repost them later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) starttime = jiffies_to_msecs(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (doorbell & DRBL_MSG_IFC_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) esas2r_write_register_dword(a, MU_DOORBELL_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) schedule_timeout_interruptible(msecs_to_jiffies(50));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) esas2r_hdebug("timeout waiting for interface down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) skip_chip_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * first things first, before we go changing any of these registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * disable the communication lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dw &= ~MU_ILC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dw &= ~MU_OLC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* configure the communication list addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ppaddr = a->inbound_list_md.phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) lower_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) upper_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ppaddr = a->outbound_list_md.phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) lower_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) upper_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ppaddr = a->uncached_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ((u8 *)a->outbound_copy - a->uncached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) lower_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) upper_32_bits(ppaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* reset the read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) *a->outbound_copy =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) a->last_write =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) a->last_read = a->list_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) a->last_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) a->last_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) a->last_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) MU_OLW_TOGGLE | a->last_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* configure the interface select fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) (dw | MU_OLIC_LIST_F0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) MU_OLIC_SOURCE_DDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* finish configuring the communication lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) | (a->list_size << MU_ILC_NUMBER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * notify the firmware that we're done setting up the communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * list registers. wait here until the firmware is done configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * its lists. it will signal that it is done by enabling the lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) starttime = jiffies_to_msecs(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (doorbell & DRBL_MSG_IFC_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) esas2r_write_register_dword(a, MU_DOORBELL_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) schedule_timeout_interruptible(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) esas2r_hdebug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) "timeout waiting for communication list init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) esas2r_bugon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return esas2r_set_degraded_mode(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) "timeout waiting for communication list init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * flag whether the firmware supports the power down doorbell. we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * determine this by reading the inbound doorbell enable mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (doorbell & DRBL_POWER_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * enable assertion of outbound queue and doorbell interrupts in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * main interrupt cause register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Process the initialization message just completed and format the next one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static bool esas2r_format_init_msg(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) u32 msg = a->init_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct atto_vda_cfg_init *ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) a->init_msg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) switch (msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) case ESAS2R_INIT_MSG_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) case ESAS2R_INIT_MSG_REINIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) esas2r_hdebug("CFG init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) esas2r_build_cfg_req(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) VDA_CFG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ci->sgl_page_size = cpu_to_le32(sgl_page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* firmware interface overflows in y2106 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rq->flags |= RF_FAILURE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) a->init_msg = ESAS2R_INIT_MSG_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) case ESAS2R_INIT_MSG_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (rq->req_stat == RS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u32 major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) u32 minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) u16 fw_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) a->fw_version = le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) rq->func_rsp.cfg_rsp.vda_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) fw_release = le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) rq->func_rsp.cfg_rsp.fw_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) major = LOBYTE(fw_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) minor = HIBYTE(fw_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) a->fw_version += (major << 16) + (minor << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) esas2r_hdebug("FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * the 2.71 and earlier releases of R6xx firmware did not error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * unsupported config requests correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) || (be32_to_cpu(a->fw_version) > 0x00524702)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) esas2r_hdebug("CFG get init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) esas2r_build_cfg_req(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) VDA_CFG_GET_INIT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) sizeof(struct atto_vda_cfg_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) rq->vrq->cfg.sg_list_offset = offsetof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct atto_vda_cfg_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) data.sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) rq->vrq->cfg.data.prde.ctl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cpu_to_le32(sizeof(struct atto_vda_cfg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) rq->vrq->cfg.data.prde.address = cpu_to_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) rq->vrq_md->phys_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) sizeof(union atto_vda_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) rq->flags |= RF_FAILURE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) case ESAS2R_INIT_MSG_GET_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (msg == ESAS2R_INIT_MSG_GET_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ci = (struct atto_vda_cfg_init *)rq->data_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (rq->req_stat == RS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) a->num_targets_backend =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) le32_to_cpu(ci->num_targets_backend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) a->ioctl_tunnel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) le32_to_cpu(ci->ioctl_tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) esas2r_hdebug("FAILED");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) rq->req_stat = RS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * Perform initialization messages via the request queue. Messages are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * performed with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) bool esas2r_init_msgs(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) bool success = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct esas2r_request *rq = &a->general_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) esas2r_rq_init_request(rq, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) rq->comp_cb = esas2r_dummy_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (a->init_msg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) a->init_msg = ESAS2R_INIT_MSG_REINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) while (a->init_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (esas2r_format_init_msg(a, rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) spin_lock_irqsave(&a->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) esas2r_start_vda_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_unlock_irqrestore(&a->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) esas2r_wait_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (rq->req_stat != RS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (rq->req_stat == RS_SUCCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) || ((rq->flags & RF_FAILURE_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) && rq->req_stat != RS_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) a->init_msg, rq->req_stat, rq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) a->init_msg = ESAS2R_INIT_MSG_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) success = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) esas2r_rq_destroy_request(rq, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* Initialize the adapter chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) bool rslt = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (!test_bit(AF_NVR_VALID, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!esas2r_nvram_read_direct(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) "invalid/missing NVRAM parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!esas2r_init_msgs(a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) esas2r_set_degraded_mode(a, "init messages failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* The firmware is ready. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) clear_bit(AF_DEGRADED_MODE, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) clear_bit(AF_CHPRST_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* Post all the async event requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) esas2r_start_ae_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!a->flash_rev[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) esas2r_read_flash_rev(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!a->image_type[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) esas2r_read_image_type(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (a->fw_version == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) a->fw_rev[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) sprintf(a->fw_rev, "%1d.%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) (int)LOBYTE(HIWORD(a->fw_version)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) (int)HIBYTE(HIWORD(a->fw_version)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) esas2r_hdebug("firmware revision: %s", a->fw_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (test_bit(AF_CHPRST_DETECTED, &a->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) && (test_bit(AF_FIRST_INIT, &a->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) esas2r_enable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* initialize discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) esas2r_disc_initialize(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * wait for the device wait time to expire here if requested. this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * usually requested during initial driver load and possibly when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * resuming from a low power state. deferred device waiting will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * interrupts. chip reset recovery always defers device waiting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * avoid being in a TASKLET too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (init_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u32 currtime = a->disc_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u32 nexttick = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) u32 deltatime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * Block Tasklets from getting scheduled and indicate this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * polled discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) set_bit(AF_TASKLET_SCHEDULED, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) set_bit(AF_DISC_POLLED, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * Temporarily bring the disable count to zero to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * deferred processing. Note that the count is already zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * after the first initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (test_bit(AF_FIRST_INIT, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) atomic_dec(&a->disable_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) while (test_bit(AF_DISC_PENDING, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) schedule_timeout_interruptible(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Determine the need for a timer tick based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * delta time between this and the last iteration of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * this loop. We don't use the absolute time because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * then we would have to worry about when nexttick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * wraps and currtime hasn't yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) deltatime = jiffies_to_msecs(jiffies) - currtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) currtime += deltatime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * Process any waiting discovery as long as the chip is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * up. If a chip reset happens during initial polling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * we have to make sure the timer tick processes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * doorbell indicating the firmware is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!test_bit(AF_CHPRST_PENDING, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) esas2r_disc_check_for_work(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Simulate a timer tick. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (nexttick <= deltatime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* Time for a timer tick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) nexttick += 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) esas2r_timer_tick(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (nexttick > deltatime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) nexttick -= deltatime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* Do any deferred processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (esas2r_is_tasklet_pending(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) esas2r_do_tasklet_tasks(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (test_bit(AF_FIRST_INIT, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) atomic_inc(&a->disable_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) clear_bit(AF_DISC_POLLED, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) esas2r_targ_db_report_changes(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * For cases where (a) the initialization messages processing may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * handle an interrupt for a port event and a discovery is waiting, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * we are not waiting for devices, or (b) the device wait time has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * exhausted but there is still discovery pending, start any leftover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * discovery in interrupt driven mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) esas2r_disc_start_waiting(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* Enable chip interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) a->int_mask = ESAS2R_INT_STS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) esas2r_enable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) esas2r_enable_heartbeat(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) rslt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * Regardless of whether initialization was successful, certain things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * need to get done before we exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) test_bit(AF_FIRST_INIT, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * Reinitialization was performed during the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * initialization. Only clear the chip reset flag so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * original device polling is not cancelled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!rslt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) clear_bit(AF_CHPRST_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* First initialization or a subsequent re-init is complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (!rslt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) clear_bit(AF_CHPRST_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) clear_bit(AF_DISC_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* Enable deferred processing after the first initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (test_bit(AF_FIRST_INIT, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) clear_bit(AF_FIRST_INIT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (atomic_dec_return(&a->disable_cnt) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) esas2r_do_deferred_processes(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return rslt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) void esas2r_reset_adapter(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) set_bit(AF_OS_RESET, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) esas2r_local_reset_adapter(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) esas2r_schedule_tasklet(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) void esas2r_reset_chip(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (!esas2r_is_adapter_present(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * Before we reset the chip, save off the VDA core dump. The VDA core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * dump is located in the upper 512KB of the onchip SRAM. Make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * to not overwrite a previous crash that was saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) esas2r_read_mem_block(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) a->fw_coredump_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) MW_DATA_ADDR_SRAM + 0x80000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ESAS2R_FWCOREDUMP_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) set_bit(AF2_COREDUMP_SAVED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (a->pcid->revision == MVR_FREY_B2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) MU_CTL_IN_FULL_RST2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) MU_CTL_IN_FULL_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* Stall a little while to let the reset condition clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) u32 starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) u32 doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) starttime = jiffies_to_msecs(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (doorbell & DRBL_POWER_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) esas2r_write_register_dword(a, MU_DOORBELL_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) schedule_timeout_interruptible(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) esas2r_hdebug("Timeout waiting for power down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Perform power management processing including managing device states, adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * states, interrupts, and I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) void esas2r_power_down(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) set_bit(AF_POWER_MGT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) set_bit(AF_POWER_DOWN, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) u32 starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) u32 doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * We are currently running OK and will be reinitializing later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * increment the disable count to coordinate with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * esas2r_init_adapter. We don't have to do this in degraded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * mode since we never enabled interrupts in the first place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) esas2r_disable_chip_interrupts(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) esas2r_disable_heartbeat(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* wait for any VDA activity to clear before continuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) esas2r_write_register_dword(a, MU_DOORBELL_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) DRBL_MSG_IFC_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) starttime = jiffies_to_msecs(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) doorbell =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) esas2r_read_register_dword(a, MU_DOORBELL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (doorbell & DRBL_MSG_IFC_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) esas2r_write_register_dword(a, MU_DOORBELL_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) schedule_timeout_interruptible(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) esas2r_hdebug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) "timeout waiting for interface down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * For versions of firmware that support it tell them the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * is powering down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) esas2r_power_down_notify_firmware(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /* Suspend I/O processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) set_bit(AF_OS_RESET, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) set_bit(AF_DISC_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) set_bit(AF_CHPRST_PENDING, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) esas2r_process_adapter_reset(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* Remove devices now that I/O is cleaned up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) esas2r_targ_db_remove_all(a, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * Perform power management processing including managing device states, adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * states, interrupts, and I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) clear_bit(AF_POWER_DOWN, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) esas2r_init_pci_cfg_space(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) set_bit(AF_FIRST_INIT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) atomic_inc(&a->disable_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* reinitialize the adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ret = esas2r_check_adapter(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (!esas2r_init_adapter_hw(a, init_poll))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /* send the reset asynchronous event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) esas2r_send_reset_ae(a, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* clear this flag after initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) clear_bit(AF_POWER_MGT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) bool esas2r_is_adapter_present(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (test_bit(AF_NOT_PRESENT, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) set_bit(AF_NOT_PRESENT, &a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) const char *esas2r_get_model_name(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) switch (a->pcid->subsystem_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case ATTO_ESAS_R680:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return "ATTO ExpressSAS R680";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case ATTO_ESAS_R608:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return "ATTO ExpressSAS R608";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) case ATTO_ESAS_R60F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return "ATTO ExpressSAS R60F";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) case ATTO_ESAS_R6F0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return "ATTO ExpressSAS R6F0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) case ATTO_ESAS_R644:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return "ATTO ExpressSAS R644";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) case ATTO_ESAS_R648:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return "ATTO ExpressSAS R648";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) case ATTO_TSSC_3808:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return "ATTO ThunderStream SC 3808D";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case ATTO_TSSC_3808E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return "ATTO ThunderStream SC 3808E";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) case ATTO_TLSH_1068:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return "ATTO ThunderLink SH 1068";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return "ATTO SAS Controller";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) switch (a->pcid->subsystem_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) case ATTO_ESAS_R680:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return "R680";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) case ATTO_ESAS_R608:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return "R608";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case ATTO_ESAS_R60F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return "R60F";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case ATTO_ESAS_R6F0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return "R6F0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case ATTO_ESAS_R644:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return "R644";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case ATTO_ESAS_R648:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return "R648";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case ATTO_TSSC_3808:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return "SC 3808D";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) case ATTO_TSSC_3808E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return "SC 3808E";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case ATTO_TLSH_1068:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return "SH 1068";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }