^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef _IBA_DEFS_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define _IBA_DEFS_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static inline u32 _iba_get8(const u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) return *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *ptr = (*ptr & ~mask) | prep_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline u16 _iba_get16(const __be16 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return be16_to_cpu(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline u32 _iba_get32(const __be32 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return be32_to_cpu(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline u64 _iba_get64(const __be64 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The mads are constructed so that 32 bit and smaller are naturally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * aligned, everything larger has a max alignment of 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return be64_to_cpu(get_unaligned(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) field_struct *_ptr = ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) FIELD_PREP(field_mask, value)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) field_struct *_ptr = ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (type *)((void *)_ptr + (field_offset)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bytes) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) const type *_in_ptr = in; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) WARN_ON(bytes * 8 > num_bits); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (in && bytes) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) type, num_bits, ptr), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) _in_ptr, bytes); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) const field_struct *_ptr = ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) (u##num_bits) FIELD_GET( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) field_mask, _iba_get##num_bits((const void *)_ptr + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (field_offset))); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define IBA_GET(field, ptr) _IBA_GET(field, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bytes) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) type *_out_ptr = out; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) WARN_ON(bytes * 8 > num_bits); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (out && bytes) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) memcpy(_out_ptr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) _IBA_GET_MEM_PTR(field_struct, field_offset, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) type, num_bits, ptr), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bytes); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * The generated list becomes the parameters to the macros, the order is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * - struct this applies to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * - starting offset of the max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * - GENMASK or GENMASK_ULL in CPU order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * - The width of data the mask operations should work on, in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Extraction using a tabular description like table 106. bit_offset is from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * the Byte[Bit] notation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) field_struct, byte_offset, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) field_struct, (byte_offset)&0xFFFE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) GENMASK(15 - (((byte_offset) % 2) * 8), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) field_struct, (byte_offset)&0xFFFC, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) GENMASK(31 - (((byte_offset) % 4) * 8), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define IBA_FIELD64_LOC(field_struct, byte_offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) field_struct, byte_offset, GENMASK_ULL(63, 0), 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * In IBTA spec, everything that is more than 64bits is multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * of bytes without leftover bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) field_struct, byte_offset, type, num_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif /* _IBA_DEFS_H_ */