Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) #ifndef _QIB_KERNEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) #define _QIB_KERNEL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * This header file is the base header file for qlogic_ib kernel code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * qib_user.h serves a similar purpose for user code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <rdma/ib_hdrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <rdma/rdma_vt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include "qib_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include "qib_verbs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* only s/w major version of QLogic_IB we can handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define QIB_CHIP_VERS_MAJ 2U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /* don't care about this except printing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define QIB_CHIP_VERS_MIN 0U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) /* The Organization Unique Identifier (Mfg code), and its position in GUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define QIB_OUI 0x001175
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define QIB_OUI_LSB 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * per driver stats, either not device nor port-specific, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * summed over all of the devices and ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * They are described by name via ipathfs filesystem, so layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * and number of elements can change without breaking compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * If members are added or deleted qib_statnames[] in qib_fs.c must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * change to match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) struct qlogic_ib_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	__u64 sps_ints; /* number of interrupts handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	__u64 sps_errints; /* number of error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	__u64 sps_txerrs; /* tx-related packet errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	__u64 sps_rcverrs; /* non-crc rcv packet errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	__u64 sps_ctxts; /* number of contexts currently open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	__u64 sps_buffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	__u64 sps_hdrfull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) extern struct qlogic_ib_stats qib_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) extern const struct pci_error_handlers qib_pci_err_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * First-cut critierion for "device is active" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * two thousand dwords combined Tx, Rx traffic per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * 5-second interval. SMA packets are 64 dwords,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * and occur "a few per second", presumably each way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * Below contains all data related to a single context (formerly called port).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) struct qib_opcode_stats_perctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) struct qib_ctxtdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	void **rcvegrbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	dma_addr_t *rcvegrbuf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	/* rcvhdrq base, needs mmap before useful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	void *rcvhdrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	/* kernel virtual address where hdrqtail is updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	void *rcvhdrtail_kvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	 * temp buffer for expected send setup, allocated at open, instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	 * of each setup call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	void *tid_pg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * Shared page for kernel to signal user processes that send buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * need disarming.  The process should call QIB_CMD_DISARM_BUFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	unsigned long *user_event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	/* when waiting for rcv or pioavail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * rcvegr bufs base, physical, must fit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * in 44 bits so 32 bit programs mmap64 44 bit works)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	dma_addr_t rcvegr_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	/* mmap of hdrq, must fit in 44 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	dma_addr_t rcvhdrq_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	dma_addr_t rcvhdrqtailaddr_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 * number of opens (including slave sub-contexts) on this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	 * (ignoring forks, dup, etc. for now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * how much space to leave at start of eager TID entries for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 * protocol use, on each TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	/* instead of calculating it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	unsigned ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	/* local node of context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	int node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	/* non-zero if ctxt is being shared. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	u16 subctxt_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	/* non-zero if ctxt is being shared. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	u16 subctxt_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* number of eager TID entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	u16 rcvegrcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	/* index of first eager TID entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	u16 rcvegr_tid_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	/* number of pio bufs for this ctxt (all procs, if shared) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	u32 piocnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	/* first pio buffer for this ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	u32 pio_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* chip offset of PIO buffers for this ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	u32 piobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	/* how many alloc_pages() chunks in rcvegrbuf_pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	u32 rcvegrbuf_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* how many egrbufs per chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u16 rcvegrbufs_perchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	/* ilog2 of above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	u16 rcvegrbufs_perchunk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	/* order for rcvegrbuf_pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	size_t rcvegrbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	/* rcvhdrq size (for freeing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	size_t rcvhdrq_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/* per-context flags for fileops/intr communication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/* next expected TID to check when looking for free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	u32 tidcursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* WAIT_RCV that timed out, no interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	u32 rcvwait_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	/* WAIT_PIO that timed out, no interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	u32 piowait_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	/* WAIT_RCV already happened, no wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u32 rcvnowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/* WAIT_PIO already happened, no wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	u32 pionowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/* total number of polled urgent packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	u32 urgent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	/* saved total number of polled urgent packets for poll edge trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	u32 urgent_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/* pid of process using this ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/* same size as task_struct .comm[], command that opened context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	char comm[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	/* pkeys set by this use of this ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	u16 pkeys[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	/* so file ops can get at unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct qib_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	/* so funcs that need physical port can get it easily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct qib_pportdata *ppd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	void *subctxt_uregbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	/* An array of pages for the eager receive buffers * N */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	void *subctxt_rcvegrbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* An array of pages for the eager header queue entries * N */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	void *subctxt_rcvhdr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	/* The version of the library which opened this ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	u32 userversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	/* Bitmask of active slaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u32 active_slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	/* Type of packets or conditions we want to poll for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u16 poll_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	/* receive packet sequence counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	u8 seq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	u8 redirect_seq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	/* ctxt rcvhdrq head offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	u32 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/* QPs waiting for context processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct list_head qp_wait_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/* verbs stats per CTX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct qib_opcode_stats_perctx *opstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) struct rvt_sge_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) struct qib_sdma_txreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	int                 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	int                 sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	dma_addr_t          addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	void              (*callback)(struct qib_sdma_txreq *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u16                 start_idx;  /* sdma private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	u16                 next_descq_idx;  /* sdma private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct list_head    list;       /* sdma private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) struct qib_sdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	__le64 qw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) struct qib_verbs_txreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct qib_sdma_txreq   txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct rvt_qp           *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	struct rvt_swqe         *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	u32                     dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	u16                     hdr_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u16                     hdr_inx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct qib_pio_header	*align_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct rvt_mregion	*mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct rvt_sge_state    *ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define QIB_SDMA_TXREQ_F_USELARGEBUF  0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) #define QIB_SDMA_TXREQ_F_HEADTOHOST   0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) #define QIB_SDMA_TXREQ_F_INTREQ       0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #define QIB_SDMA_TXREQ_F_FREEBUF      0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) #define QIB_SDMA_TXREQ_F_FREEDESC     0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #define QIB_SDMA_TXREQ_S_OK        0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #define QIB_SDMA_TXREQ_S_SENDERROR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #define QIB_SDMA_TXREQ_S_ABORTED   2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #define QIB_SDMA_TXREQ_S_SHUTDOWN  3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * Mostly for MADs that set or query link parameters, also ipath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * config interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) #define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) #define QIB_IB_CFG_LWID 3 /* currently active Link-width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) #define QIB_IB_CFG_SPD 5 /* current Link spd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) #define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) #define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) #define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) #define QIB_IB_CFG_PKEYS 16 /* update partition keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) #define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) #define QIB_IB_CFG_VL_HIGH_LIMIT 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) #define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) #define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * QIB_IB_CFG_LINKDEFAULT cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #define   IB_LINKCMD_DOWN   (0 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) #define   IB_LINKCMD_ARMED  (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) #define   IB_LINKCMD_ACTIVE (2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) #define   IB_LINKINITCMD_NOP     0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) #define   IB_LINKINITCMD_POLL    1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) #define   IB_LINKINITCMD_SLEEP   2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #define   IB_LINKINITCMD_DISABLE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * valid states passed to qib_set_linkstate() user call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) #define QIB_IB_LINKDOWN         0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) #define QIB_IB_LINKARM          1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) #define QIB_IB_LINKACTIVE       2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) #define QIB_IB_LINKDOWN_ONLY    3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) #define QIB_IB_LINKDOWN_SLEEP   4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) #define QIB_IB_LINKDOWN_DISABLE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * are also the the possible values for qib_link_speed_enabled and active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * The values were chosen to match values used within the IB spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #define QIB_IB_SDR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) #define QIB_IB_DDR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) #define QIB_IB_QDR 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #define QIB_DEFAULT_MTU 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) /* max number of IB ports supported per HCA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) #define QIB_MAX_IB_PORTS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * Possible IB config parameters for f_get/set_ib_table()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * these are bits so they can be combined, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) #define QIB_RCVCTRL_TAILUPD_ENB 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) #define QIB_RCVCTRL_TAILUPD_DIS 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) #define QIB_RCVCTRL_CTXT_ENB 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) #define QIB_RCVCTRL_CTXT_DIS 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) #define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) #define QIB_RCVCTRL_PKEY_ENB 0x40  /* Note, default is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) #define QIB_RCVCTRL_PKEY_DIS 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) #define QIB_RCVCTRL_BP_ENB 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) #define QIB_RCVCTRL_BP_DIS 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) #define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) #define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * Possible "operations" for f_sendctrl(ppd, op, var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * these are bits so they can be combined, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * Some operations (e.g. DISARM, ABORT) are known to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * be "one-shot", so do not modify shadow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #define QIB_SENDCTRL_DISARM       (0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) #define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/* available (0x2000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) #define QIB_SENDCTRL_AVAIL_DIS    (0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) #define QIB_SENDCTRL_AVAIL_ENB    (0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #define QIB_SENDCTRL_AVAIL_BLIP  (0x10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) #define QIB_SENDCTRL_SEND_DIS    (0x20000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) #define QIB_SENDCTRL_SEND_ENB    (0x40000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) #define QIB_SENDCTRL_FLUSH       (0x80000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) #define QIB_SENDCTRL_CLEAR      (0x100000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #define QIB_SENDCTRL_DISARM_ALL (0x200000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * These are the generic indices for requesting per-port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * counter values via the f_portcntr function.  They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * are always returned as 64 bit values, although most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * are 32 bit counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) /* send-related counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) #define QIBPORTCNTR_PKTSEND         0U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) #define QIBPORTCNTR_WORDSEND        1U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) #define QIBPORTCNTR_PSXMITDATA      2U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) #define QIBPORTCNTR_PSXMITPKTS      3U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) #define QIBPORTCNTR_PSXMITWAIT      4U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) #define QIBPORTCNTR_SENDSTALL       5U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) /* receive-related counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) #define QIBPORTCNTR_PKTRCV          6U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) #define QIBPORTCNTR_PSRCVDATA       7U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) #define QIBPORTCNTR_PSRCVPKTS       8U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) #define QIBPORTCNTR_RCVEBP          9U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) #define QIBPORTCNTR_RCVOVFL         10U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) #define QIBPORTCNTR_WORDRCV         11U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) /* IB link related error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) #define QIBPORTCNTR_RXLOCALPHYERR   12U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #define QIBPORTCNTR_RXVLERR         13U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) #define QIBPORTCNTR_ERRICRC         14U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #define QIBPORTCNTR_ERRVCRC         15U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #define QIBPORTCNTR_ERRLPCRC        16U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) #define QIBPORTCNTR_BADFORMAT       17U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) #define QIBPORTCNTR_ERR_RLEN        18U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) #define QIBPORTCNTR_IBSYMBOLERR     19U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) #define QIBPORTCNTR_INVALIDRLEN     20U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #define QIBPORTCNTR_UNSUPVL         21U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) #define QIBPORTCNTR_EXCESSBUFOVFL   22U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) #define QIBPORTCNTR_ERRLINK         23U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #define QIBPORTCNTR_IBLINKDOWN      24U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) #define QIBPORTCNTR_IBLINKERRRECOV  25U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) #define QIBPORTCNTR_LLI             26U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) /* other error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #define QIBPORTCNTR_RXDROPPKT       27U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) #define QIBPORTCNTR_VL15PKTDROP     28U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) #define QIBPORTCNTR_ERRPKEY         29U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) #define QIBPORTCNTR_KHDROVFL        30U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) /* sampling counters (these are actually control registers) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #define QIBPORTCNTR_PSINTERVAL      31U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) #define QIBPORTCNTR_PSSTART         32U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #define QIBPORTCNTR_PSSTAT          33U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) /* how often we check for packet activity for "power on hours (in seconds) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #define ACTIVITY_TIMER 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #define MAX_NAME_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) #ifdef CONFIG_INFINIBAND_QIB_DCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) struct qib_irq_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) struct qib_msix_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	void *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) #ifdef CONFIG_INFINIBAND_QIB_DCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int dca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	int rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct qib_irq_notify *notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) /* Below is an opaque struct. Each chip (device) can maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * private data needed for its operation, but not germane to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * rest of the driver.  For convenience, we define another that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  * is chip-specific, per-port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) struct qib_chip_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) struct qib_chipport_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) enum qib_sdma_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	qib_sdma_state_s00_hw_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	qib_sdma_state_s10_hw_start_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	qib_sdma_state_s20_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	qib_sdma_state_s30_sw_clean_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	qib_sdma_state_s40_hw_clean_up_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	qib_sdma_state_s50_hw_halt_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	qib_sdma_state_s99_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) enum qib_sdma_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	qib_sdma_event_e00_go_hw_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	qib_sdma_event_e10_go_hw_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	qib_sdma_event_e20_hw_started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	qib_sdma_event_e30_go_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	qib_sdma_event_e40_sw_cleaned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	qib_sdma_event_e50_hw_cleaned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	qib_sdma_event_e60_hw_halted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	qib_sdma_event_e70_go_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	qib_sdma_event_e7220_err_halted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	qib_sdma_event_e7322_err_halted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	qib_sdma_event_e90_timer_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) struct sdma_set_state_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	unsigned op_enable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	unsigned op_intenable:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	unsigned op_halt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	unsigned op_drain:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	unsigned go_s99_running_tofalse:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	unsigned go_s99_running_totrue:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) struct qib_sdma_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct kref          kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct completion    comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	enum qib_sdma_states current_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct sdma_set_state_action *set_state_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	unsigned             current_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	unsigned             go_s99_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	unsigned             first_sendbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	unsigned             last_sendbuf; /* really last +1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	/* debugging/devel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	enum qib_sdma_states previous_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	unsigned             previous_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	enum qib_sdma_events last_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) struct xmit_wait {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u64 counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		u64 psxmitdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		u64 psrcvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		u64 psxmitpkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		u64 psrcvpkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		u64 psxmitwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	} counter_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * The structure below encapsulates data relevant to a physical IB Port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * Current chips support only one such port, but the separation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * clarifies things a bit. Note that to conform to IB conventions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * port-numbers are one-based. The first or only port is port1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) struct qib_pportdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct qib_ibport ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct qib_devdata *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct qib_chippport_specific *cpspec; /* chip-specific per-port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct kobject pport_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct kobject pport_cc_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct kobject sl2vl_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	struct kobject diagc_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	/* GUID for this interface, in network order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	__be64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	/* QIB_POLL, etc. link-state specific flags, per port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	u32 lflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* qib_lflags driver is waiting for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	u32 state_wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	spinlock_t lflags_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	/* ref count for each pkey */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	atomic_t pkeyrefs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * this address is mapped readonly into user processes so they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 * get status cheaply, whenever they want.  One qword of status per port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	u64 *statusp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	/* SendDMA related entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* read mostly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct qib_sdma_desc *sdma_descq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct workqueue_struct *qib_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct qib_sdma_state sdma_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	dma_addr_t       sdma_descq_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	dma_addr_t       sdma_head_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	u16                   sdma_descq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	/* read/write using lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	spinlock_t            sdma_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct list_head      sdma_activelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct list_head      sdma_userpending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	u64                   sdma_descq_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	u64                   sdma_descq_removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	u16                   sdma_descq_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	u16                   sdma_descq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	u8                    sdma_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	u8                    sdma_intrequest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct tasklet_struct sdma_sw_clean_up_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	wait_queue_head_t state_wait; /* for state_wanted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	/* HoL blocking for SMP replies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	unsigned          hol_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct timer_list hol_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 * Shadow copies of registers; size indicates read access size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	 * Most of them are readonly, but some are write-only register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 * where we manipulate the bits in the shadow copy, and then write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 * the shadow copy to qlogic_ib.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * We deliberately make most of these 32 bits, since they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * restricted range.  For any that we read, we won't to generate 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * transactions for a 64 bit read, and we want to avoid unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * bus transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	/* This is the 64 bit group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/* last ibcstatus.  opaque outside chip-specific code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	u64 lastibcstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	/* these are the "32 bit" regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * all expect bit fields to be "unsigned long"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	unsigned long p_sendctrl; /* shadow per-port sendctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u32 ibmtu; /* The MTU programmed for this unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * Current max size IB packet (in bytes) including IB headers, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * we can send. Changes when ibmtu changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	u32 ibmaxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * ibmaxlen at init time, limited by chip and by receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 * size.  Not changed after init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	u32 init_ibmaxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/* LID programmed for this instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	u16 lid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* list of pkeys programmed; 0 if not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	u16 pkeys[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/* LID mask control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	u8 lmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	u8 link_width_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	u16 link_speed_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	u8 link_width_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	u16 link_speed_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	u8 link_width_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	u16 link_speed_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	u8 vls_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	u8 vls_operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* Rx Polarity inversion (compensate for ~tx on partner) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	u8 rx_pol_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	u8 hw_pidx;     /* physical port index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	u8 port;        /* IB port number and index into dd->pports - 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	u8 delay_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/* used to override LED behavior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	u8 led_override;  /* Substituted for normal value, if non-zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	u16 led_override_timeoff; /* delta to next timer event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	u8 led_override_vals[2]; /* Alternates per blink-frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	u8 led_override_phase; /* Just counts, LSB picks from vals[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	atomic_t led_override_timer_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* Used to flash LEDs in override mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	struct timer_list led_override_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct xmit_wait cong_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	struct timer_list symerr_clear_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	/* Synchronize access between driver writes and sysfs reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	spinlock_t cc_shadow_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/* Shadow copy of the congestion control table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct cc_table_shadow *ccti_entries_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	/* Shadow copy of the congestion control entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	/* List of congestion control table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct ib_cc_table_entry_shadow *ccti_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/* 16 congestion entries with each entry corresponding to a SL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct ib_cc_congestion_entry_shadow *congestion_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	/* Maximum number of congestion control entries that the agent expects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * the manager to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	u16 cc_supported_table_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* Total number of congestion control table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u16 total_cct_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Bit map identifying service level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	u16 cc_sl_control_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/* maximum congestion control table index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	u16 ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	/* CA's max number of 64 entry units in the congestion control table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	u8 cc_max_table_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) /* Observers. Not to be taken lightly, possibly not to ship. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * If a diag read or write is to (bottom <= offset <= top),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * the "hoook" is called, allowing, e.g. shadows to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * updated in sync with the driver. struct diag_observer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * is the "visible" part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) struct diag_observer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) typedef int (*diag_hook) (struct qib_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	const struct diag_observer *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	u32 offs, u64 *data, u64 mask, int only_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) struct diag_observer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	diag_hook hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	u32 bottom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	u32 top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) extern int qib_register_observer(struct qib_devdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	const struct diag_observer *op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) /* Only declared here, not defined. Private to diags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) struct diag_observer_list_elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) /* device data struct now contains only "general per-device" info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  * fields related to a physical IB port are in a qib_pportdata struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * described above) while fields only used by a particular chip-type are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * a qib_chipdata struct, whose contents are opaque to this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) struct qib_devdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	struct qib_ibdev verbs_dev;     /* must be first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/* pointers to related structs for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/* pci access data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct pci_dev *pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct cdev *user_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct cdev *diag_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct device *user_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	struct device *diag_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* mem-mapped pointer to base of chip regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	u64 __iomem *kregbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/* end of mem-mapped chip space excluding sendbuf and user regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	u64 __iomem *kregend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* physical address of chip for io_remap, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	resource_size_t physaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* qib_cfgctxts pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	struct qib_ctxtdata **rcd; /* Receive Context Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* qib_pportdata, points to array of (physical) port-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * data structs, indexed by pidx (0..n-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	struct qib_pportdata *pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct qib_chip_specific *cspec; /* chip-specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* kvirt address of 1st 2k pio buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	void __iomem *pio2kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* kvirt address of 1st 4k pio buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	void __iomem *pio4kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	void __iomem *piobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	u64 __iomem *userbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * points to area where PIOavail registers will be DMA'ed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * Has to be on a page of it's own, because the page will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * mapped into user program space.  This copy is *ONLY* ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * written by DMA, not by the driver!  Need a copy per device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * when we get to multiple devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	/* physical address where updates occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	dma_addr_t pioavailregs_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	/* device-specific implementations of functions needed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	 * common code. Contrary to previous consensus, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * really just point to a device-specific table, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 * may need to "bend", e.g. *_f_put_tid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/* fallback to alternate interrupt type if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	int (*f_intr_fallback)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/* hard reset chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	int (*f_reset)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	void (*f_quiet_serdes)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int (*f_bringup_serdes)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	int (*f_early_init)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				u32, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	void (*f_cleanup)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	void (*f_setextled)(struct qib_pportdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	/* fill out chip-specific fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/* free irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	void (*f_free_irq)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct qib_message_header *(*f_get_msgheader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 					(struct qib_devdata *, __le32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	void (*f_config_ctxts)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	int (*f_get_ib_cfg)(struct qib_pportdata *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	u32 (*f_iblink_state)(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	u8 (*f_ibphys_portstate)(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	void (*f_xgxs_reset)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/* per chip actions needed for IB Link up/down changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	int (*f_ib_updown)(struct qib_pportdata *, int, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	/* Read/modify/write of GPIO pins (potentially chip-specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		u32 mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* Enable writes to config EEPROM (if supported) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	 * modify rcvctrl shadow[s] and write to appropriate chip-regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	 * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * (ctxt == -1) means "all contexts", only meaningful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * clearing. Could remove if chip_spec shutdown properly done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		int ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	/* Read/modify/write sendctrl appropriately for op and port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	void (*f_sendctrl)(struct qib_pportdata *, u32 op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	void (*f_set_intr_state)(struct qib_devdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	void (*f_set_armlaunch)(struct qib_devdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	int (*f_late_initreg)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	int (*f_init_sdma_regs)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	u16 (*f_sdma_gethead)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	int (*f_sdma_busy)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	void (*f_sdma_init_early)(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		u64 **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		char **, u64 **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	void (*f_initvl15_bufs)(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	void (*f_init_ctxt)(struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	void (*f_writescratch)(struct qib_devdata *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) #ifdef CONFIG_INFINIBAND_QIB_DCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	char *boardname; /* human readable board info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	/* template for writing TIDs  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	u64 tidtemplate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* value to write to free TIDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	u64 tidinvalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* number of registers used for pioavail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	u32 pioavregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* device (not port) flags, basically device capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* last buffer for user use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	u32 lastctxt_piobuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* reset value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	u64 z_int_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/* percpu intcounter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	u64 __percpu *int_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	/* pio bufs allocated per ctxt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	u32 pbufsctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	u32 ctxts_extrabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * number of ctxts configured as max; zero is set to number chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * supports, less gives more pio bufs/ctxt, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	u32 cfgctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 * number of ctxts available for PSM open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	u32 freectxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	 * hint that we should update pioavailshadow before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	 * looking for a PIO buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	u32 upd_pio_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* internal debugging stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	u32 maxpkts_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	u32 avgpkts_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	u64 nopiobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* PCI Vendor ID (here for NodeInfo) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	u16 vendorid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	/* PCI Device ID (here for NodeInfo) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	u16 deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/* for write combining settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int wc_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	unsigned long wc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	unsigned long wc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* shadow copy of struct page *'s for exp tid pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct page **pageshadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/* shadow copy of dma handles for exp tid pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	dma_addr_t *physshadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	u64 __iomem *egrtidbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	spinlock_t uctxt_lock; /* rcd and user context changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * per unit status, see also portdata statusp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 * mapped readonly into user processes so they can get unit and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 * IB link status cheaply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	u64 *devstatusp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	char *freezemsg; /* freeze msg if hw error put chip in freeze */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	u32 freezelen; /* max length of freezemsg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/* timer used to prevent stats overflow, error throttling, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct timer_list stats_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/* timer to verify interrupts work, and fallback if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	struct timer_list intrchk_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned long ureg_align; /* user register alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 * pio_writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	spinlock_t pioavail_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 * index of last buffer to optimize search for next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	u32 last_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 * min kernel pio buffer to optimize search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	u32 min_kernel_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * Shadow copies of registers; size indicates read access size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 * Most of them are readonly, but some are write-only register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * where we manipulate the bits in the shadow copy, and then write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * the shadow copy to qlogic_ib.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 * We deliberately make most of these 32 bits, since they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * restricted range.  For any that we read, we won't to generate 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * transactions for a 64 bit read, and we want to avoid unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * bus transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* This is the 64 bit group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	unsigned long pioavailshadow[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* bitmap of send buffers available for the kernel to use with PIO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	unsigned long pioavailkernel[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	/* bitmap of send buffers which need to be disarmed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	unsigned long pio_need_disarm[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/* bitmap of send buffers which are being written to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	unsigned long pio_writing[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/* kr_revision shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	u64 revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/* Base GUID for device (from eeprom, network order) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	__be64 base_guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 * kr_sendpiobufbase value (chip offset of pio buffers), and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * base of the 2KB buffer s(user processes only use 2K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	u64 piobufbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	u32 pio2k_bufbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	/* these are the "32 bit" regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* number of GUIDs in the flash for this interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	u32 nguid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * all expect bit fields to be "unsigned long"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	unsigned long rcvctrl; /* shadow per device rcvctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	unsigned long sendctrl; /* shadow per device sendctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/* value we put in kr_rcvhdrcnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	u32 rcvhdrcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* value we put in kr_rcvhdrsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	u32 rcvhdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* value we put in kr_rcvhdrentsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	u32 rcvhdrentsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* kr_ctxtcnt value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	u32 ctxtcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* kr_pagealign value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	u32 palign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/* number of "2KB" PIO buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	u32 piobcnt2k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	/* size in bytes of "2KB" PIO buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	u32 piosize2k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	/* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	u32 piosize2kmax_dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	/* number of "4KB" PIO buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	u32 piobcnt4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	/* size in bytes of "4KB" PIO buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	u32 piosize4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* kr_rcvegrbase value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	u32 rcvegrbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	/* kr_rcvtidbase value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	u32 rcvtidbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	/* kr_rcvtidcnt value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	u32 rcvtidcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* kr_userregbase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	u32 uregbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	/* shadow the control register contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* chip address space used by 4k pio buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	u32 align4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/* size of each rcvegrbuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	u16 rcvegrbufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/* log2 of above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	u16 rcvegrbufsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* localbus width (1, 2,4,8,16,32) from config space  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	u32 lbus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	/* localbus speed in MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	u32 lbus_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	int unit; /* unit # of this chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* start of CHIP_SPEC move to chipspec, but need code changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/* low and high portions of MSI capability/vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	u32 msi_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	/* saved after PCIe init for restore after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	u32 msi_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	/* MSI data (vector) saved for restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	u16 msi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* so we can rewrite it after a chip reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	u32 pcibar0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	/* so we can rewrite it after a chip reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	u32 pcibar1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	u64 rhdrhead_intr_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * ASCII serial number, from flash, large enough for original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 * all digit strings, and longer QLogic serial number format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	u8 serial[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	/* human readable board version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	u8 boardversion[96];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	u8 lbus_info[32]; /* human readable localbus info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/* chip major rev, from qib_revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	u8 majrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	/* chip minor rev, from qib_revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	u8 minrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	/* Misc small ints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	/* Number of physical ports available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	u8 num_pports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	/* Lowest context number which can be used by user processes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	u8 first_user_ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	u8 n_krcv_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	u8 qpn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	u8 skip_kctxt_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	u16 rhf_offset; /* offset of RHF within receive header entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	 * GPIO pins for twsi-connected devices, and device code for eeprom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	u8 gpio_sda_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	u8 gpio_scl_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	u8 twsi_eeprom_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	u8 board_atten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	/* Support (including locks) for EEPROM logging of errors and time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	/* control access to actual counters, timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	spinlock_t eep_st_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	/* control high-level access to EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	struct mutex eep_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	uint64_t traffic_wds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	struct qib_diag_client *diag_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	struct diag_observer_list_elt *diag_observer_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	u8 psxmitwait_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* cycle length of PS* counters in HW (in picoseconds) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	u16 psxmitwait_check_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	/* high volume overflow errors defered to tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct tasklet_struct error_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	int assigned_node_id; /* NUMA node closest to HCA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* hol_state values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #define QIB_HOL_UP       0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #define QIB_HOL_INIT     1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #define QIB_SDMA_SENDCTRL_OP_ENABLE    (1U << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #define QIB_SDMA_SENDCTRL_OP_HALT      (1U << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #define QIB_SDMA_SENDCTRL_OP_CLEANUP   (1U << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #define QIB_SDMA_SENDCTRL_OP_DRAIN     (1U << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* operation types for f_txchk_change() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define TXCHK_CHG_TYPE_DIS1  3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #define TXCHK_CHG_TYPE_ENAB1 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) #define TXCHK_CHG_TYPE_KERN  1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #define TXCHK_CHG_TYPE_USER  0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #define QIB_CHASE_TIME msecs_to_jiffies(145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /* Private data for file operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct qib_filedata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	struct qib_ctxtdata *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	unsigned subctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	unsigned tidcursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	struct qib_user_sdma_queue *pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	int rec_cpu_num; /* for cpu affinity; -1 if none */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) extern struct xarray qib_dev_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) extern struct qib_devdata *qib_lookup(int unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) extern u32 qib_cpulist_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) extern unsigned long *qib_cpulist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) extern unsigned qib_cc_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int qib_init(struct qib_devdata *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int init_chip_wc_pat(struct qib_devdata *dd, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int qib_enable_wc(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) void qib_disable_wc(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int qib_count_units(int *npresentp, int *nupp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int qib_count_active_units(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int qib_cdev_init(int minor, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		  const struct file_operations *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		  struct cdev **cdevp, struct device **devp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) int qib_dev_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) void qib_dev_cleanup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int qib_diag_add(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) void qib_diag_remove(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) void qib_bad_intrstatus(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void qib_handle_urcv(struct qib_devdata *, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* clean up any per-chip chip-specific stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) void qib_chip_cleanup(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* clean up any chip type-specific stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) void qib_chip_done(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* check to see if we have to force ordering for write combining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int qib_unordered_wc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void qib_pio_copy(void __iomem *to, const void *from, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) void qib_cancel_sends(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) int qib_setup_eagerbufs(struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) void qib_set_ctxtcnt(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int qib_create_ctxts(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) int qib_reset_device(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int qib_wait_linkstate(struct qib_pportdata *, u32, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int qib_set_linkstate(struct qib_pportdata *, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int qib_set_mtu(struct qib_pportdata *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int qib_set_lid(struct qib_pportdata *, u32, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) void qib_hol_down(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) void qib_hol_init(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) void qib_hol_up(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) void qib_hol_event(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) void qib_disable_after_error(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int qib_set_uevent_bits(struct qib_pportdata *, const int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* for use in system calls, where we want to know device type, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #define ctxt_fp(fp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	(((struct qib_filedata *)(fp)->private_data)->rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #define subctxt_fp(fp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	(((struct qib_filedata *)(fp)->private_data)->subctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #define tidcursor_fp(fp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	(((struct qib_filedata *)(fp)->private_data)->tidcursor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) #define user_sdma_queue_fp(fp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	(((struct qib_filedata *)(fp)->private_data)->pq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	return ppd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	return container_of(dev, struct qib_devdata, verbs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	return dd_from_dev(to_idev(ibdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	return container_of(ibp, struct qib_pportdata, ibport_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	WARN_ON(pidx >= dd->num_pports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	return &dd->pport[pidx].ibport_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * values for dd->flags (_device_ related flags) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #define QIB_HAS_LINK_LATENCY  0x1 /* supports link latency (IB 1.2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #define QIB_INITTED           0x2 /* chip and driver up and initted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #define QIB_DOING_RESET       0x4  /* in the middle of doing chip reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) #define QIB_PRESENT           0x8  /* chip accesses can be done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) #define QIB_PIO_FLUSH_WC      0x10 /* Needs Write combining flush for PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #define QIB_HAS_THRESH_UPDATE 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) #define QIB_HAS_SDMA_TIMEOUT  0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) #define QIB_USE_SPCL_TRIG     0x100 /* SpecialTrigger launch enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) #define QIB_NODMA_RTAIL       0x200 /* rcvhdrtail register DMA enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) #define QIB_HAS_INTX          0x800 /* Supports INTx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) #define QIB_HAS_SEND_DMA      0x1000 /* Supports Send DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) #define QIB_HAS_VLSUPP        0x2000 /* Supports multiple VLs; PBC different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) #define QIB_HAS_HDRSUPP       0x4000 /* Supports header suppression */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #define QIB_BADINTR           0x8000 /* severe interrupt problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) #define QIB_DCA_ENABLED       0x10000 /* Direct Cache Access enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) #define QIB_HAS_QSFP          0x20000 /* device (card instance) has QSFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) #define QIB_SHUTDOWN          0x40000 /* device is shutting down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  * values for ppd->lflags (_ib_port_ related flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #define QIBL_LINKV             0x1 /* IB link state valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #define QIBL_LINKDOWN          0x8 /* IB link is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #define QIBL_LINKINIT          0x10 /* IB link level is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #define QIBL_LINKARMED         0x20 /* IB link is ARMED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) #define QIBL_LINKACTIVE        0x40 /* IB link is ACTIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* leave a gap for more IB-link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) #define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #define QIBL_IB_LINK_DISABLED  0x4000 /* Linkdown-disable forced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 				       * Do not try to bring up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) #define QIBL_IB_FORCE_NOTIFY   0x8000 /* force notify on next ib change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* IB dword length mask in PBC (lower 11 bits); same for all chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #define QIB_PBC_LENGTH_MASK                     ((1 << 11) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* ctxt_flag bit offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		/* waiting for a packet to arrive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) #define QIB_CTXT_WAITING_RCV   2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		/* master has not finished initializing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #define QIB_CTXT_MASTER_UNINIT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		/* waiting for an urgent packet to arrive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #define QIB_CTXT_WAITING_URG 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /* free up any allocated data at closes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) void qib_free_data(struct qib_ctxtdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			    u32, struct qib_ctxtdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 					   const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 					   const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 					   const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) void qib_free_devdata(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) #define QIB_TWSI_NO_DEV 0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* Below qib_twsi_ functions must be called with eep_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int qib_twsi_reset(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		    int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		    const void *buffer, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) void qib_get_eeprom_info(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) void qib_dump_lookup_output_queue(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) void qib_force_pio_avail_update(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) void qib_clear_symerror_on_linkup(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * Set LED override, only the two LSBs have "public" meaning, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * any non-zero value substitutes them for the Link and LinkTrain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * LED states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) #define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) #define QIB_LED_LOG 2  /* Logical (link) YELLOW LED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* send dma routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int qib_setup_sdma(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) void qib_teardown_sdma(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) void __qib_sdma_intr(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) void qib_sdma_intr(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) void qib_user_sdma_send_desc(struct qib_pportdata *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			struct list_head *pktlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			u32, struct qib_verbs_txreq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* ppd->sdma_lock should be locked before calling this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int qib_sdma_make_progress(struct qib_pportdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	return ppd->sdma_descq_added == ppd->sdma_descq_removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* must be called under qib_sdma_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	return ppd->sdma_descq_cnt -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		(ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static inline int __qib_sdma_running(struct qib_pportdata *ppd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int qib_sdma_running(struct qib_pportdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) void dump_sdma_state(struct qib_pportdata *ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * number of words used for protocol header if not set by qib_userinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #define QIB_DFLT_RCVHDRSIZE 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * We need to be able to handle an IB header of at least 24 dwords.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * We need the rcvhdrq large enough to handle largest IB header, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * still have room for a 2KB MTU standard IB packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * Additionally, some processor/memory controller combinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * benefit quite strongly from having the DMA'ed data be cacheline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * aligned and a cacheline multiple, so we set the size to 32 dwords
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * (2 64-byte primary cachelines for pretty much all processors of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * interest).  The alignment hurts nothing, other than using somewhat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * more memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) #define QIB_RCVHDR_ENTSIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int qib_get_user_pages(unsigned long, size_t, struct page **);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) void qib_release_user_pages(struct page **, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) void qib_sendbuf_done(struct qib_devdata *, unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	*((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 * volatile because it's a DMA target from the chip, routine is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 * inlined, and don't want register caching or reordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	return (u32) le64_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		*((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	const struct qib_devdata *dd = rcd->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	u32 hdrqtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (dd->flags & QIB_NODMA_RTAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		__le32 *rhf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		rhf_addr = (__le32 *) rcd->rcvhdrq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			rcd->head + dd->rhf_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		seq = qib_hdrget_seq(rhf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		hdrqtail = rcd->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		if (seq == rcd->seq_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			hdrqtail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		hdrqtail = qib_get_rcvhdrtail(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	return hdrqtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * sysfs interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) extern const char ib_qib_version[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) extern const struct attribute_group qib_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int qib_device_create(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) void qib_device_remove(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			  struct kobject *kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void qib_verbs_unregister_sysfs(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* Hook for sysfs read of QSFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int __init qib_init_qibfs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) int __exit qib_exit_qibfs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) int qibfs_add(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int qibfs_remove(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		    const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) void qib_pcie_ddcleanup(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) void qib_free_irq(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) int qib_reinit_intr(struct qib_devdata *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* interrupts for device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) u64 qib_int_counter(struct qib_devdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* interrupt for all devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) u64 qib_sps_ints(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * dma_addr wrappers - all 0's invalid for hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * Flush write combining store buffers (if present) and perform a write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static inline void qib_flush_wc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) #if defined(CONFIG_X86_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	asm volatile("sfence" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	wmb(); /* no reorder around wc flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* global module parameter variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) extern unsigned qib_ibmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) extern ushort qib_cfgctxts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) extern ushort qib_num_cfg_vls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) extern unsigned qib_n_krcv_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) extern unsigned qib_sdma_fetch_arb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) extern unsigned qib_compat_ddr_negotiate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) extern int qib_special_trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) extern unsigned qib_numa_aware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) extern struct mutex qib_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* Number of seconds before our card status check...  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) #define STATUS_TIMEOUT 60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) #define QIB_DRV_NAME            "ib_qib"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) #define QIB_USER_MINOR_BASE     0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #define QIB_TRACE_MINOR         127
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #define QIB_DIAGPKT_MINOR       128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #define QIB_DIAG_MINOR_BASE     129
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) #define QIB_NMINORS             255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) #define PCI_VENDOR_ID_PATHSCALE 0x1fc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) #define PCI_VENDOR_ID_QLOGIC 0x1077
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) #define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) #define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) #define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  * qib_early_err is used (only!) to print early errors before devdata is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * allocated, or when dd->pcidev may not be valid, and at the tail end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  * cleanup when devdata may have been freed, etc.  qib_dev_porterr is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  * the same as qib_dev_err, but is used when the message really needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * the IB port# to be definitive as to what's happening..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * All of these go to the trace log, and the trace log entry is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * first to avoid possible serial port delays from printk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) #define qib_early_err(dev, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	dev_err(dev, fmt, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) #define qib_dev_err(dd, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #define qib_dev_warn(dd, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) #define qib_dev_porterr(dd, port, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) #define qib_devinfo(pcidev, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  * this is used for formatting hw error messages...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct qib_hwerror_msgs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	size_t sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) #define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) /* in qib_intr.c... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) void qib_format_hwerrors(u64 hwerrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			 const struct qib_hwerror_msgs *hwerrmsgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			 size_t nhwerrmsgs, char *msg, size_t lmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void qib_stop_send_queue(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) void qib_quiesce_qp(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) void qib_flush_qp_waiters(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) int qib_mtu_to_path_mtu(u32 mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) void qib_notify_error_qp(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			   struct ib_qp_attr *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) #endif                          /* _QIB_KERNEL_H */