^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2017 Broadcom. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Contact Information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * linux-drivers@broadcom.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef BEISCSI_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define BEISCSI_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irq_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define FW_VER_LEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define MCC_Q_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define MCC_CQ_LEN 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define MAX_MCC_CMD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* BladeEngine Generation numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define BE_GEN2 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define BE_GEN3 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define BE_GEN4 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct be_dma_mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct be_queue_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct be_dma_mem dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u16 entry_size; /* Size of an element in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u16 tail, head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) bool created;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u16 used; /* Number of valid elements in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline u32 MODULO(u16 val, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) WARN_ON(limit & (limit - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return val & (limit - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline void index_inc(u16 *index, u16 limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *index = MODULO((*index + 1), limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline void *queue_head_node(struct be_queue_info *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return q->dma_mem.va + q->head * q->entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return q->dma_mem.va + wrb_num * q->entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void *queue_tail_node(struct be_queue_info *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return q->dma_mem.va + q->tail * q->entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void queue_head_inc(struct be_queue_info *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) index_inc(&q->head, q->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void queue_tail_inc(struct be_queue_info *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) index_inc(&q->tail, q->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*ISCSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 eq_prev; /* Used to calculate eqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 prev_eqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define BEISCSI_EQ_DELAY_MIN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define BEISCSI_EQ_DELAY_DEF 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define BEISCSI_EQ_DELAY_MAX 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct be_eq_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 cq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct be_queue_info q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct beiscsi_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct be_queue_info *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct work_struct mcc_work; /* Work Item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct irq_poll iopoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct be_mcc_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct be_queue_info q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct be_queue_info cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct beiscsi_mcc_tag_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long tag_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define MCC_TAG_STATE_RUNNING 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define MCC_TAG_STATE_TIMEOUT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MCC_TAG_STATE_ASYNC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MCC_TAG_STATE_IGNORE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void (*cbfn)(struct beiscsi_hba *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct be_dma_mem tag_mem_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct be_ctrl_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u8 __iomem *csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 __iomem *db; /* Door Bell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u8 __iomem *pcicfg; /* PCI config space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Mbox used for cmd request/response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct be_dma_mem mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * is stored for freeing purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct be_dma_mem mbox_mem_alloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* MCC Rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct be_mcc_obj mcc_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned int mcc_tag[MAX_MCC_CMD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int mcc_tag_status[MAX_MCC_CMD + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned short mcc_alloc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned short mcc_free_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned int mcc_tag_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #include "be_cmds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* WRB index mask for MCC_Q_LEN queue entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define PAGE_SHIFT_4K 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Returns number of pages spanned by the data starting at the given addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define PAGES_4K_SPANNED(_address, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Returns bit offset within a DWORD of a bitfield */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define AMAP_BIT_OFFSET(_struct, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) (((size_t)&(((_struct *)0)->field))%32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Returns the bit mask of the field that is NOT shifted into location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline u32 amap_mask(u32 bitsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void amap_set(void *ptr, u32 dw_offset, u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u32 *dw = (u32 *) ptr + dw_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *dw &= ~(mask << offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *dw |= (mask & value) << offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define AMAP_SET_BITS(_struct, field, ptr, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) amap_set(ptr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) offsetof(_struct, field)/32, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) amap_mask(sizeof(((_struct *)0)->field)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) AMAP_BIT_OFFSET(_struct, field), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 *dw = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return mask & (*(dw + dw_offset) >> offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define AMAP_GET_BITS(_struct, field, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) amap_get(ptr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) offsetof(_struct, field)/32, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) amap_mask(sizeof(((_struct *)0)->field)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) AMAP_BIT_OFFSET(_struct, field))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void swap_dws(void *wrb, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 *dw = wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) WARN_ON(len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *dw = cpu_to_le32(*dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif /* __BIG_ENDIAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif /* BEISCSI_H */