^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * HighPoint RR3xxx/4xxx controller driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Please report bugs/comments/suggestions to linux@highpoint-tech.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * For more information, visit http://www.highpoint-tech.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef _HPTIOP_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define _HPTIOP_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct hpt_iopmu_itl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) __le32 resrved0[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) __le32 inbound_msgaddr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) __le32 inbound_msgaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) __le32 outbound_msgaddr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) __le32 outbound_msgaddr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) __le32 inbound_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) __le32 inbound_intstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) __le32 inbound_intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __le32 outbound_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __le32 outbound_intstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __le32 outbound_intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __le32 reserved1[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __le32 inbound_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __le32 outbound_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define IOPMU_QUEUE_EMPTY 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define IOPMU_OUTBOUND_INT_MSG0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define IOPMU_OUTBOUND_INT_MSG1 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define IOPMU_OUTBOUND_INT_DOORBELL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define IOPMU_OUTBOUND_INT_POSTQUEUE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define IOPMU_OUTBOUND_INT_PCI 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define IOPMU_INBOUND_INT_MSG0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define IOPMU_INBOUND_INT_MSG1 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define IOPMU_INBOUND_INT_DOORBELL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define IOPMU_INBOUND_INT_ERROR 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define IOPMU_INBOUND_INT_POSTQUEUE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MVIOP_QUEUE_LEN 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct hpt_iopmu_mv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __le32 inbound_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) __le32 inbound_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __le32 outbound_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __le32 outbound_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __le32 inbound_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __le32 outbound_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __le32 reserve[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __le64 inbound_q[MVIOP_QUEUE_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __le64 outbound_q[MVIOP_QUEUE_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct hpt_iopmv_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __le32 reserved[0x20400 / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __le32 inbound_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __le32 inbound_intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __le32 outbound_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __le32 outbound_intmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #pragma pack(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct hpt_iopmu_mvfrey {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __le32 reserved0[(0x4000 - 0) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __le32 inbound_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __le32 inbound_base_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __le32 reserved1[(0x4018 - 0x4008) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __le32 inbound_write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) __le32 reserved2[(0x402c - 0x401c) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __le32 inbound_conf_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) __le32 reserved3[(0x4050 - 0x4030) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __le32 outbound_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __le32 outbound_base_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __le32 outbound_shadow_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __le32 outbound_shadow_base_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __le32 reserved4[(0x4088 - 0x4060) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __le32 isr_cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __le32 isr_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) __le32 reserved5[(0x1020c - 0x4090) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __le32 pcie_f0_int_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __le32 reserved6[(0x10400 - 0x10210) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __le32 f0_to_cpu_msg_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __le32 reserved7[(0x10420 - 0x10404) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __le32 cpu_to_f0_msg_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __le32 reserved8[(0x10480 - 0x10424) / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __le32 f0_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __le32 f0_doorbell_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct mvfrey_inlist_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __le32 intrfc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __le32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct mvfrey_outlist_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #pragma pack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define MVIOP_MU_INBOUND_INT_MSG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define MVIOP_MU_OUTBOUND_INT_MSG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define CL_POINTER_TOGGLE 0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) enum hpt_iopmu_message {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* host-to-iop messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) IOPMU_INBOUND_MSG0_NOP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) IOPMU_INBOUND_MSG0_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) IOPMU_INBOUND_MSG0_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) IOPMU_INBOUND_MSG0_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) IOPMU_INBOUND_MSG0_RESET_COMM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) IOPMU_INBOUND_MSG0_MAX = 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* iop-to-host messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct hpt_iop_request_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __le32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) __le32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __le32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __le32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __le32 context; /* host context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) __le32 context_hi32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define IOP_REQUEST_FLAG_SYNC_REQUEST 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define IOP_REQUEST_FLAG_BIST_REQUEST 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define IOP_REQUEST_FLAG_REMAPPED 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) enum hpt_iop_request_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) IOP_REQUEST_TYPE_GET_CONFIG = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) IOP_REQUEST_TYPE_SET_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) IOP_REQUEST_TYPE_BLOCK_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) IOP_REQUEST_TYPE_SCSI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) IOP_REQUEST_TYPE_IOCTL_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) IOP_REQUEST_TYPE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) enum hpt_iop_result_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) IOP_RESULT_PENDING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) IOP_RESULT_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) IOP_RESULT_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) IOP_RESULT_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) IOP_RESULT_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) IOP_RESULT_INVALID_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) IOP_RESULT_BAD_TARGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) IOP_RESULT_CHECK_CONDITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct hpt_iop_request_get_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct hpt_iop_request_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __le32 interface_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __le32 firmware_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __le32 max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __le32 request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __le32 max_sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __le32 data_transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __le32 alignment_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) __le32 max_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __le32 sdram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct hpt_iop_request_set_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct hpt_iop_request_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __le32 iop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __le16 vbus_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __le16 max_host_request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __le32 reserve[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct hpt_iopsg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __le32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __le32 eot; /* non-zero: end of table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __le64 pci_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct hpt_iop_request_block_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct hpt_iop_request_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u8 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u8 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u8 lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u8 pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __le16 sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __le64 lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct hpt_iopsg sg_list[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define IOP_BLOCK_COMMAND_READ 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define IOP_BLOCK_COMMAND_WRITE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define IOP_BLOCK_COMMAND_VERIFY 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define IOP_BLOCK_COMMAND_FLUSH 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define IOP_BLOCK_COMMAND_SHUTDOWN 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct hpt_iop_request_scsi_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct hpt_iop_request_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u8 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u8 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u8 lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u8 pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u8 cdb[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __le32 dataxfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct hpt_iopsg sg_list[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct hpt_iop_request_ioctl_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct hpt_iop_request_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __le32 ioctl_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __le32 inbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __le32 outbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __le32 bytes_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u8 buf[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* out data should be put at buf[(inbuf_size+3)&~3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define HPTIOP_MAX_REQUESTS 256u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct hptiop_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct hptiop_request *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void *req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u32 req_shifted_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct scsi_cmnd *scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct hpt_scsi_pointer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int sgcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) enum hptiop_family {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) UNKNOWN_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) INTEL_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) MV_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) MVFREY_BASED_IOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct hptiop_hba {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct hptiop_adapter_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct hpt_iopmu_itl __iomem *iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void __iomem *plx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } itl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct hpt_iopmv_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct hpt_iopmu_mv __iomem *mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) void *internal_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dma_addr_t internal_req_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } mv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct hpt_iop_request_get_config __iomem *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct hpt_iopmu_mvfrey __iomem *mu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int internal_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct hptiop_request internal_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int list_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mvfrey_inlist_entry *inlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) dma_addr_t inlist_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __le32 inlist_wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct mvfrey_outlist_entry *outlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dma_addr_t outlist_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) __le32 *outlist_cptr; /* copy pointer shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dma_addr_t outlist_cptr_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __le32 outlist_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) } mvfrey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct pci_dev *pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* IOP config info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u32 interface_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 firmware_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 sdram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 max_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u32 max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u32 max_request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u32 max_sg_descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 req_size; /* host-allocated request buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 iopintf_v2: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 initialized: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 msg_done: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct hptiop_request * req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* used to free allocated dma area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void *dma_coherent[HPTIOP_MAX_REQUESTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) atomic_t reset_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) atomic_t resetting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) wait_queue_head_t reset_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) wait_queue_head_t ioctl_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct hpt_ioctl_k {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct hptiop_hba * hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 ioctl_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 inbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u32 outbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void *inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void *outbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u32 *bytes_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void (*done)(struct hpt_ioctl_k *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int result; /* HPT_IOCTL_RESULT_ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct hptiop_adapter_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) enum hptiop_family family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int (*internal_memalloc)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int (*internal_memfree)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int (*map_pci_bar)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) void (*unmap_pci_bar)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void (*enable_intr)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void (*disable_intr)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int (*get_config)(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct hpt_iop_request_get_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int (*set_config)(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct hpt_iop_request_set_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int (*iop_intr)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void (*post_msg)(struct hptiop_hba *hba, u32 msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int hw_dma_bit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int (*reset_comm)(struct hptiop_hba *hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) __le64 host_phy_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define HPT_IOCTL_RESULT_OK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define HPT_IOCTL_RESULT_FAILED (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define dprintk(fmt, args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif