^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2007 Nuova Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifndef _CQ_DESC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define _CQ_DESC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Completion queue descriptor types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) enum cq_desc_types {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) CQ_DESC_TYPE_WQ_ENET = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) CQ_DESC_TYPE_DESC_COPY = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) CQ_DESC_TYPE_WQ_EXCH = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) CQ_DESC_TYPE_RQ_ENET = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) CQ_DESC_TYPE_RQ_FCP = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Completion queue descriptor: 16B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * All completion queues have this basic layout. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * type_specfic area is unique for each completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * queue type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct cq_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __le16 completed_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __le16 q_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u8 type_specfic[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u8 type_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CQ_DESC_TYPE_BITS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CQ_DESC_COLOR_MASK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CQ_DESC_COLOR_SHIFT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CQ_DESC_Q_NUM_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CQ_DESC_COMP_NDX_BITS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void cq_desc_dec(const struct cq_desc *desc_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) const struct cq_desc *desc = desc_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) const u8 type_color = desc->type_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Make sure color bit is read from desc *before* other fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * are read from desc. Hardware guarantees color bit is last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * bit (byte) written. Adding the rmb() prevents the compiler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * and/or CPU from reordering the reads which would potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * result in reading stale values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *type = type_color & CQ_DESC_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *completed_index = le16_to_cpu(desc->completed_index) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) CQ_DESC_COMP_NDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif /* _CQ_DESC_H_ */