^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * modification, are permitted provided that the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * * Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * * Neither the name of Freescale Semiconductor nor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * names of its contributors may be used to endorse or promote products
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * derived from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ALTERNATIVELY, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * GNU General Public License ("GPL") as published by the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Foundation, either version 2 of that License or (at your option) any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifndef __FSL_QMAN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define __FSL_QMAN_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Hardware constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define QM_CHANNEL_SWPORTAL0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define QMAN_CHANNEL_POOL1 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define QMAN_CHANNEL_CAAM 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define QMAN_CHANNEL_POOL1_REV3 0x401
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define QMAN_CHANNEL_CAAM_REV3 0x840
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) extern u16 qm_channel_pool1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern u16 qm_channel_caam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Portal processing (interrupt) sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * This mask contains all the interrupt sources that need handling except DQRI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * ie. that if present should trigger slow-path processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) QM_PIRQ_MRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* For qman_static_dequeue_*** APIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* for n in [1,15] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* for conversion from n of qm_channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* --- QMan data structures (and associated constants) --- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* "Frame Descriptor (FD)" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct qm_fd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u8 cfg8b_w1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 bpid; /* Buffer Pool ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u8 cfg8b_w3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u8 addr_hi; /* high 8-bits of 40-bit address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) __be32 addr_lo; /* low 32-bits of 40-bit address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __be64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __be32 cfg; /* format, offset, length / congestion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __be32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define QM_FD_FORMAT_SG BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define QM_FD_FORMAT_LONG BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define QM_FD_FORMAT_COMPOUND BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define QM_FD_FORMAT_MASK GENMASK(31, 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define QM_FD_OFF_SHIFT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define QM_FD_OFF_MASK GENMASK(28, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define QM_FD_LEN_MASK GENMASK(19, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) enum qm_fd_format {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * 'contig' implies a contiguous buffer, whereas 'sg' implies a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * scatter-gather table. 'big' implies a 29-bit length with no offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * implies a s/g-like table, where each entry itself represents a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * (contiguous or scatter-gather) and the 29-bit "length" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * interpreted purely for congestion calculations, ie. a "congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * weight".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) qm_fd_contig = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) qm_fd_contig_big = QM_FD_FORMAT_LONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) qm_fd_sg = QM_FD_FORMAT_SG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) qm_fd_compound = QM_FD_FORMAT_COMPOUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return be64_to_cpu(fd->data) & 0xffffffffffLLU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return be64_to_cpu(fd->data) & 0xffffffffffLLU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) fd->addr_hi = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * The 'format' field indicates the interpretation of the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * 29 bits of the 32-bit word.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * If 'format' is _contig or _sg, 20b length and 9b offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * If 'format' is _contig_big or _sg_big, 29b length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If 'format' is _compound, 29b "congestion weight".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline int qm_fd_get_offset(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline int qm_fd_get_length(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline int qm_fd_get_len_big(const struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int off, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define qm_fd_set_contig(fd, off, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) qm_fd_set_param(fd, qm_fd_contig, off, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define qm_fd_set_contig_big(fd, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline void qm_fd_clear_fd(struct qm_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) fd->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) fd->cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) fd->cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Scatter/Gather table entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct qm_sg_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u8 __reserved1[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u8 addr_hi; /* high 8-bits of 40-bit address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) __be32 addr_lo; /* low 32-bits of 40-bit address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __be64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __be32 cfg; /* E bit, F bit, length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u8 __reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u8 bpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __be16 offset; /* 13-bit, _res[13-15]*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define QM_SG_LEN_MASK GENMASK(29, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define QM_SG_OFF_MASK GENMASK(12, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define QM_SG_FIN BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define QM_SG_EXT BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return be64_to_cpu(sg->data) & 0xffffffffffLLU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return be64_to_cpu(sg->data) & 0xffffffffffLLU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sg->addr_hi = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return be32_to_cpu(sg->cfg) & QM_SG_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return be32_to_cpu(sg->cfg) & QM_SG_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* "Frame Dequeue Response" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct qm_dqrr_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u8 verb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u8 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) __be16 seqnum; /* 15-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u8 tok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u8 __reserved2[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) __be32 fqid; /* 24-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __be32 context_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct qm_fd fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u8 __reserved4[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } __packed __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define QM_DQRR_VERB_VBIT 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* 'fqid' is a 24-bit field in every h/w descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define QM_FQID_MASK GENMASK(23, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* "ERN Message Response" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* "FQ State Change Notification" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) union qm_mr_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u8 verb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u8 __reserved[63];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u8 verb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u8 dca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) __be16 seqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u8 rc; /* Rej Code: 8-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u8 __reserved[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) __be32 fqid; /* 24-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) __be32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct qm_fd fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u8 __reserved1[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } __packed __aligned(64) ern;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u8 verb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u8 fqs; /* Frame Queue Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u8 __reserved1[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __be32 fqid; /* 24-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __be32 context_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u8 __reserved2[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define QM_MR_VERB_VBIT 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * from the other MR types by noting if the 0x20 bit is unset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define QM_MR_VERB_TYPE_MASK 0x27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define QM_MR_VERB_DC_ERN 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define QM_MR_VERB_FQRN 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define QM_MR_VERB_FQRNI 0x22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define QM_MR_VERB_FQRL 0x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define QM_MR_VERB_FQPN 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define QM_MR_RC_MASK 0xf0 /* contains one of; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define QM_MR_RC_CGR_TAILDROP 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define QM_MR_RC_WRED 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define QM_MR_RC_ERROR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define QM_MR_RC_ORPWINDOW_EARLY 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define QM_MR_RC_ORPWINDOW_LATE 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define QM_MR_RC_FQ_TAILDROP 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define QM_MR_RC_ORPWINDOW_RETIRED 0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define QM_MR_RC_ORP_ZERO 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * An identical structure of FQD fields is present in the "Init FQ" command and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * latter has two inlines to assist with converting to/from the mant+exp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * representation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct qm_fqd_stashing {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* See QM_STASHING_EXCL_<...> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u8 exclusive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Numbers of cachelines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct qm_fqd_oac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* "Overhead Accounting Control", see QM_OAC_<...> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u8 oac; /* oac[6-7], _res[0-5] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Two's-complement value (-128 to +127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) s8 oal; /* "Overhead Accounting Length" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct qm_fqd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u8 orpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u8 cgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __be16 dest_wq; /* channel[3-15], wq[0-2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) __be16 ics_cred; /* 15-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * For "Initialize Frame Queue" commands, the write-enable mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * determines whether 'td' or 'oac_init' is observed. For query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * commands, this field is always 'td', and 'oac_query' (below) reflects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * the Overhead ACcounting values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct qm_fqd_oac oac_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) __be32 context_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Treat it as 64-bit opaque */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __be64 opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) __be32 hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __be32 lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Treat it as s/w portal stashing config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* see "FQD Context_A field used for [...]" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct qm_fqd_stashing stashing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * 48-bit address of FQ context to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * stash, must be cacheline-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __be16 context_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __be32 context_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } context_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct qm_fqd_oac oac_query;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define QM_FQD_CHAN_OFF 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define QM_FQD_WQ_MASK GENMASK(2, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define QM_FQD_TD_MANT_OFF 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define QM_FQD_TD_MAX 0xe0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define QM_FQD_TD_MANT_MAX 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define QM_FQD_OAC_OFF 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #define QM_FQD_AS_OFF 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #define QM_FQD_DS_OFF 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define QM_FQD_XS_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* 64-bit converters for context_hi/lo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return qm_fqd_stashing_get64(fqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* convert a threshold value into mant+exp representation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int roundup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u32 e = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int td, oddbit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (val > QM_FQD_TD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) while (val > QM_FQD_TD_MANT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) oddbit = val & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) val >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (roundup && oddbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) val++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) td |= (e & QM_FQD_TD_EXP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fqd->td = cpu_to_be16(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* and the other direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int td = be16_to_cpu(fqd->td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) << (td & QM_FQD_TD_EXP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct qm_fqd_stashing *st = &fqd->context_a.stashing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) (cs & QM_FQD_XS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return fqd->context_a.stashing.cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) fqd->oac_init.oal = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) (wq & QM_FQD_WQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* See "Frame Queue Descriptor (FQD)" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* See "FQD Context_A field used for [...] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #define QM_STASHING_EXCL_ANNOTATION 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #define QM_STASHING_EXCL_DATA 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) #define QM_STASHING_EXCL_CTX 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* See "Intra Class Scheduling" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * and associated commands/responses. The WRED parameters are calculated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * these fields as follows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * MaxTH = MA * (2 ^ Mn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Slope = SA / (2 ^ Sn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * MaxP = 4 * (Pn + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct qm_cgr_wr_parm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) __be32 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * management commands, this is padded to a 16-bit structure field, so that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * how we represent it here. The congestion state threshold is calculated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * these fields as follows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * CS threshold = TA * (2 ^ Tn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct qm_cgr_cs_thres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* _res[13-15], TA[5-12], Tn[0-4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __be16 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * This identical structure of CGR fields is present in the "Init/Modify CGR"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * commands and the "Query CGR" result. It's suctioned out here into its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct __qm_mc_cgr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct qm_cgr_wr_parm wr_parm_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct qm_cgr_wr_parm wr_parm_y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct qm_cgr_wr_parm wr_parm_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) u8 wr_en_g; /* boolean, use QM_CGR_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) u8 wr_en_y; /* boolean, use QM_CGR_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u8 wr_en_r; /* boolean, use QM_CGR_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) u8 cscn_en; /* boolean, use QM_CGR_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __be16 cscn_targ_dcp_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) __be32 cscn_targ; /* use QM_CGR_TARG_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) u8 cstd_en; /* boolean, use QM_CGR_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u8 cs; /* boolean, only used in query response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Convert CGR thresholds to/from "cs_thres" format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int thres = be16_to_cpu(th->word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return ((thres >> 5) & 0xff) << (thres & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int roundup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 e = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int oddbit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) while (val > 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) oddbit = val & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) val >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (roundup && oddbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) val++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* "Initialize FQ" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct qm_mcc_initfq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u8 __reserved1[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) __be16 we_mask; /* Write Enable Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __be32 fqid; /* 24-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) __be16 count; /* Initialises 'count+1' FQDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct qm_fqd fqd; /* the FQD fields go here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u8 __reserved2[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* "Initialize/Modify CGR" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct qm_mcc_initcgr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u8 __reserve1[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) __be16 we_mask; /* Write Enable Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct __qm_mc_cgr cgr; /* CGR fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) u8 __reserved2[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u8 cgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u8 __reserved3[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* INITFQ-specific flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) #define QM_INITFQ_WE_OAC 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) #define QM_INITFQ_WE_ORPC 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #define QM_INITFQ_WE_CGID 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #define QM_INITFQ_WE_FQCTRL 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #define QM_INITFQ_WE_DESTWQ 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #define QM_INITFQ_WE_ICSCRED 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #define QM_INITFQ_WE_TDTHRESH 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #define QM_INITFQ_WE_CONTEXTB 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) #define QM_INITFQ_WE_CONTEXTA 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* INITCGR/MODIFYCGR-specific flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #define QM_CGR_WE_WR_PARM_G 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #define QM_CGR_WE_WR_PARM_Y 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #define QM_CGR_WE_WR_PARM_R 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #define QM_CGR_WE_WR_EN_G 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #define QM_CGR_WE_WR_EN_Y 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #define QM_CGR_WE_WR_EN_R 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) #define QM_CGR_WE_CSCN_EN 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) #define QM_CGR_WE_CSCN_TARG 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #define QM_CGR_WE_CSTD_EN 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) #define QM_CGR_WE_CS_THRES 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #define QM_CGR_WE_MODE 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #define QMAN_CGR_FLAG_USE_INIT 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #define QMAN_CGR_MODE_FRAME 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Portal and Frame Queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Represents a managed portal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct qman_portal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * This object type represents QMan frame queue descriptors (FQD), it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * cacheline-aligned, and initialised by qman_create_fq(). The structure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * defined further down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct qman_fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * This object type represents a QMan congestion group, it is defined further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct qman_cgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * This enum, and the callback type that returns it, are used when handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * dequeued frames via DQRR. Note that for "null" callbacks registered with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * portal object (for handling dequeues that do not demux because context_b is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * NULL), the return value *MUST* be qman_cb_dqrr_consume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) enum qman_cb_dqrr_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* DQRR entry can be consumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) qman_cb_dqrr_consume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Like _consume, but requests parking - FQ must be held-active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) qman_cb_dqrr_park,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Does not consume, for DCA mode only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) qman_cb_dqrr_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Stop processing without consuming this ring entry. Exits the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * an interrupt handler, the callback would typically call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * otherwise the interrupt will reassert immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) qman_cb_dqrr_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Like qman_cb_dqrr_stop, but consumes the current entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) qman_cb_dqrr_consume_stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct qman_fq *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) const struct qm_dqrr_entry *dqrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * are always consumed after the callback returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) const union qm_mr_entry *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * held-active + held-suspended are just "sched". Things like "retired" will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * then, to indicate it's completing and to gate attempts to retry the retire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * index rather than the FQ that ring entry corresponds to), so repeated park
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * commands are allowed (if you're silly enough to try) but won't change FQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * state, and the resulting park notifications move FQs from "sched" to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * "parked".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) enum qman_fq_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) qman_fq_state_oos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) qman_fq_state_parked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) qman_fq_state_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) qman_fq_state_retired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Frame queue objects (struct qman_fq) are stored within memory passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * qman_create_fq(), as this allows stashing of caller-provided demux callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * they should;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * (a) extend the qman_fq structure with their state; eg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * // myfq is allocated and driver_fq callbacks filled in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * struct my_fq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * struct qman_fq base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * int an_extra_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * [ ... add other fields to be associated with each FQ ...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * } *myfq = some_my_fq_allocator();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * // in a dequeue callback, access extra fields from 'fq' via a cast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * struct my_fq *myfq = (struct my_fq *)fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * do_something_with(myfq->an_extra_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * [...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * (b) when and if configuring the FQ for context stashing, specify how ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * many cachelines are required to stash 'struct my_fq', to accelerate not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * only the QMan driver but the callback as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct qman_fq_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) qman_cb_dqrr dqrr; /* for dequeued frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) qman_cb_mr ern; /* for s/w ERNs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) qman_cb_mr fqs; /* frame-queue state changes*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct qman_fq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Caller of qman_create_fq() provides these demux callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct qman_fq_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * These are internal to the driver, don't touch. In particular, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * may change, be removed, or extended (so you shouldn't rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * sizeof(qman_fq) being a constant).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 fqid, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) enum qman_fq_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int cgr_groupid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * This callback type is used when handling congestion group entry/exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) typedef void (*qman_cb_cgr)(struct qman_portal *qm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct qman_cgr *cgr, int congested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct qman_cgr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Set these prior to qman_create_cgr() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) qman_cb_cgr cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* These are private to the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) u16 chan; /* portal channel this object is created on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Flags to qman_create_fq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Flags to qman_init_fq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * FQID(n) to fill in the frame queue ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #define QM_VDQCR_PRECEDENCE_VDQCR 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #define QM_VDQCR_EXACT 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) #define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* "Query FQ Non-Programmable Fields" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct qm_mcr_queryfq_np {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) u8 verb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) u8 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u8 __reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u8 state; /* QM_MCR_NP_STATE_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u32 fqd_link; /* 24-bit, _res2[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u16 odp_seq; /* 14-bit, _res3[14-15] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u16 orp_nesn; /* 14-bit, _res4[14-15] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u16 orp_ea_hseq; /* 15-bit, _res5[15] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u16 orp_ea_tseq; /* 15-bit, _res6[15] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u8 __reserved2[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) u8 is; /* 1-bit, _res12[1-7] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u16 ics_surp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 byte_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u32 frm_cnt; /* 24-bit, _res13[24-31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 __reserved3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) u16 __reserved4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #define QM_MCR_NP_STATE_FE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #define QM_MCR_NP_STATE_R 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) #define QM_MCR_NP_STATE_OOS 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) #define QM_MCR_NP_STATE_RETIRED 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #define QM_MCR_NP_STATE_TEN_SCHED 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) #define QM_MCR_NP_STATE_TRU_SCHED 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #define QM_MCR_NP_STATE_PARKED 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) #define QM_MCR_NP_STATE_ACTIVE 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) enum qm_mcr_queryfq_np_masks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) qm_mcr_fqd_link_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) qm_mcr_odp_seq_mask = BIT(14) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) qm_mcr_orp_nesn_mask = BIT(14) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) qm_mcr_is_mask = BIT(1) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) qm_mcr_frm_cnt_mask = BIT(24) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) #define qm_mcr_np_get(np, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ((np)->field & (qm_mcr_##field##_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* Portal Management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * qman_p_irqsource_add - add processing sources to be interrupt-driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @bits: bitmask of QM_PIRQ_**I processing sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * Adds processing sources that should be interrupt-driven (rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * processed via qman_poll_***() functions).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * qman_p_irqsource_remove - remove processing sources from being int-driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @bits: bitmask of QM_PIRQ_**I processing sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Removes processing sources from being interrupt-driven, so that they will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * instead be processed via qman_poll_***() functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * qman_affine_cpus - return a mask of cpus that have affine portals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) const cpumask_t *qman_affine_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * qman_affine_channel - return the channel ID of an portal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * @cpu: the cpu whose affine portal is the subject of the query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * If @cpu is -1, the affine portal for the current CPU will be used. It is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * bug to call this function for any value of @cpu (other than -1) that is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * member of the mask returned from qman_affine_cpus().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u16 qman_affine_channel(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * qman_get_affine_portal - return the portal pointer affine to cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @cpu: the cpu whose affine portal is the subject of the query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct qman_portal *qman_get_affine_portal(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * qman_start_using_portal - register a device link for the portal user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * @p: the portal that will be in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @dev: the device that will use the portal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Makes sure that the devices that use the portal are unbound when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * portal is unbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) int qman_start_using_portal(struct qman_portal *p, struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * qman_p_poll_dqrr - process DQRR (fast-path) entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * @limit: the maximum number of DQRR entries to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Use of this function requires that DQRR processing not be interrupt-driven.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * The return value represents the number of DQRR entries processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * Adds a set of pool channels to the portal's static dequeue command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * (SDQCR). The requested pools are limited to those the portal has dequeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * access to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* FQ management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * qman_create_fq - Allocates a FQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * @flags: bit-mask of QMAN_FQ_FLAG_*** options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * @fq: memory for storing the 'fq', with callbacks filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * Creates a frame queue object for the given @fqid, unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * dynamically allocated (or the function fails if none are available). Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * created, the caller should not touch the memory at 'fq' except as extended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * adjacent memory for user-defined fields (see the definition of "struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * pre-existing frame-queues that aren't to be otherwise interfered with, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * causes the driver to honour any context_b modifications requested in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * qm_init_fq() API, as this indicates the frame queue will be consumed by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * software portals, the context_b field is controlled by the driver and can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * be modified by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * qman_destroy_fq - Deallocates a FQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * @fq: the frame queue object to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * not deallocated but the caller regains ownership, to do with as desired. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * FQ must be in the 'out-of-service' or in the 'parked' state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) void qman_destroy_fq(struct qman_fq *fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * qman_fq_fqid - Queries the frame queue ID of a FQ object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @fq: the frame queue object to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) u32 qman_fq_fqid(struct qman_fq *fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * @fq: the frame queue object to modify, must be 'parked' or new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * @opts: the FQ-modification settings, as defined in the low-level API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * The @opts parameter comes from the low-level portal API. Select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * rather than parked. NB, @opts can be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Note that some fields and options within @opts may be ignored or overwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * by the driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * 1. the 'count' and 'fqid' fields are always ignored (this operation only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * affects one frame queue: @fq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * 'fqd' structure's 'context_b' field are sometimes overwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * initialised to a value used by the driver for demux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * - if context_b is initialised for demux, so is context_a in case stashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * is requested (see item 4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * objects.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * 'dest::channel' field will be overwritten to match the portal used to issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * the command. If the WE_DESTWQ write-enable bit had already been set by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * caller, the channel workqueue will be left as-is, otherwise the write-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * isn't set, the destination channel/workqueue fields and the write-enable bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * are left as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * 4. if the driver overwrites context_a/b for demux, then if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * context_a.address fields and will leave the stashing fields provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * user alone, otherwise it will zero out the context_a.stashing fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * qman_schedule_fq - Schedules a FQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * @fq: the frame queue object to schedule, must be 'parked'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * Schedules the frame queue, which must be Parked, which takes it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int qman_schedule_fq(struct qman_fq *fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * qman_retire_fq - Retires a FQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @fq: the frame queue object to retire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * the retirement was started asynchronously, otherwise it returns negative for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * failure. When this function returns zero, @flags is set to indicate whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * the retired FQ is empty and/or whether it has any ORL fragments (to show up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * as ERNs). Otherwise the corresponding flags will be known when a subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * FQRN message shows up on the portal's message ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * Active state), the completion will be via the message ring as a FQRN - but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * the corresponding callback may occur before this function returns!! Ie. the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * caller should be prepared to accept the callback as the function is called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * not only once it has returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int qman_retire_fq(struct qman_fq *fq, u32 *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * qman_oos_fq - Puts a FQ "out of service"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * @fq: the frame queue object to be put out-of-service, must be 'retired'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * The frame queue must be retired and empty, and if any order restoration list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * was released as ERNs at the time of retirement, they must all be consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int qman_oos_fq(struct qman_fq *fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * qman_volatile_dequeue - Issue a volatile dequeue command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * @fq: the frame queue object to dequeue from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * the VDQCR is already in use, otherwise returns non-zero for failure. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * the VDQCR command has finished executing (ie. once the callback for the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * DQRR entry resulting from the VDQCR command has been called). If not using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * the FINISH flag, completion can be determined either by detecting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * for the QMAN_FQ_STATE_VDQCR bit to disappear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * qman_enqueue - Enqueue a frame to a frame queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * @fq: the frame queue object to enqueue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * @fd: a descriptor of the frame to be enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * field is ignored. The return value is non-zero on error, such as ring full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * @result: is set by the API to the base FQID of the allocated range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * @count: the number of FQIDs required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int qman_alloc_fqid_range(u32 *result, u32 count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * qman_release_fqid - Release the specified frame queue ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * @fqid: the FQID to be released back to the resource pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * This function can also be used to seed the allocator with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * FQID ranges that it can subsequently allocate from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int qman_release_fqid(u32 fqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * qman_query_fq_np - Queries non-programmable FQD fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * @fq: the frame queue object to be queried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * @np: storage for the queried FQD fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Pool-channel management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * @result: is set by the API to the base pool-channel ID of the allocated range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * @count: the number of pool-channel IDs required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int qman_alloc_pool_range(u32 *result, u32 count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * qman_release_pool - Release the specified pool-channel ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * @id: the pool-chan ID to be released back to the resource pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * This function can also be used to seed the allocator with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * pool-channel ID ranges that it can subsequently allocate from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int qman_release_pool(u32 id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* CGR management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * qman_create_cgr - Register a congestion group object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @cgr: the 'cgr' object, with fields filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * @flags: QMAN_CGR_FLAG_* values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * @opts: optional state of CGR settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Registers this object to receiving congestion entry/exit callbacks on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * portal affine to the cpu portal on which this API is executed. If opts is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * NULL then only the callback (cgr->cb) function is registered. If @flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * any unspecified parameters) will be used rather than a modify hw hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * (which only modifies the specified parameters).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct qm_mcc_initcgr *opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * qman_delete_cgr - Deregisters a congestion group object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * @cgr: the 'cgr' object to deregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * "Unplugs" this CGR object from the portal affine to the cpu on which this API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * is executed. This must be excuted on the same affine portal on which it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int qman_delete_cgr(struct qman_cgr *cgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * @cgr: the 'cgr' object to deregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * This will select the proper CPU and run there qman_delete_cgr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) void qman_delete_cgr_safe(struct qman_cgr *cgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * qman_query_cgr_congested - Queries CGR's congestion status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * @cgr: the 'cgr' object to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * @result: returns 'cgr's congestion status, 1 (true) if congested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * @result: is set by the API to the base CGR ID of the allocated range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * @count: the number of CGR IDs required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) int qman_alloc_cgrid_range(u32 *result, u32 count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) #define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * qman_release_cgrid - Release the specified CGR ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * @id: the CGR ID to be released back to the resource pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * This function can also be used to seed the allocator with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * CGR ID ranges that it can subsequently allocate from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int qman_release_cgrid(u32 id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * qman_is_probed - Check if qman is probed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * Returns 1 if the qman driver successfully probed, -1 if the qman driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * failed to probe or 0 if the qman driver did not probed yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) int qman_is_probed(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * qman_portals_probed - Check if all cpu bound qman portals are probed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * Returns 1 if all the required cpu bound qman portals successfully probed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * -1 if probe errors appeared or 0 if the qman portals did not yet finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * probing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int qman_portals_probed(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * @portal: portal to get the value for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * @ithresh: threshold pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * @portal: portal to set the new value on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * @ithresh: new threshold value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * qman_dqrr_get_iperiod - Get coalesce interrupt period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * @portal: portal to get the value for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * @iperiod: period pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * qman_dqrr_set_iperiod - Set coalesce interrupt period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * @portal: portal to set the new value on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * @ithresh: new period value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * Returns 0 on success, or a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #endif /* __FSL_QMAN_H */