Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * IBM Power Virtual Ethernet Device Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) IBM Corporation, 2003, 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors: Dave Larson <larson1@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	    Santiago Leon <santil@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *	    Brian King <brking@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	    Robert Jennings <rcj@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *	    Anton Blanchard <anton@au.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #ifndef _IBMVETH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define _IBMVETH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* constants for H_MULTICAST_CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define IbmVethMcastReceptionModifyBit     0x80000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define IbmVethMcastReceptionEnableBit     0x20000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define IbmVethMcastFilterModifyBit        0x40000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define IbmVethMcastFilterEnableBit        0x10000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define IbmVethMcastEnableRecv       (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define IbmVethMcastDisableRecv      (IbmVethMcastReceptionModifyBit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define IbmVethMcastEnableFiltering  (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define IbmVethMcastAddFilter        0x1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define IbmVethMcastRemoveFilter     0x2UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define IbmVethMcastClearFilterTable 0x3UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define IBMVETH_ILLAN_LRG_SR_ENABLED	0x0000000000010000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define IBMVETH_ILLAN_LRG_SND_SUPPORT	0x0000000000008000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define IBMVETH_ILLAN_IPV4_TCP_CSUM		0x0000000000000002UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define IBMVETH_ILLAN_ACTIVE_TRUNK		0x0000000000000001UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /* hcall macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)   plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define h_free_logical_lan(ua) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)   plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define h_add_logical_lan_buffer(ua, buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)   plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static inline long h_send_logical_lan(unsigned long unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		unsigned long desc1, unsigned long desc2, unsigned long desc3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		unsigned long desc4, unsigned long desc5, unsigned long desc6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		unsigned long corellator_in, unsigned long *corellator_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		unsigned long mss, unsigned long large_send_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	if (large_send_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 				  desc1, desc2, desc3, desc4, desc5, desc6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 				  corellator_in, mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 				  desc1, desc2, desc3, desc4, desc5, desc6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 				  corellator_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	*corellator_out = retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static inline long h_illan_attributes(unsigned long unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 				      unsigned long reset_mask, unsigned long set_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				      unsigned long *ret_attributes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			 reset_mask, set_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	*ret_attributes = retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define h_multicast_ctrl(ua, cmd, mac) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)   plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define h_change_logical_lan_mac(ua, mac) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)   plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define IBMVETH_NUM_BUFF_POOLS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define IBMVETH_MIN_MTU 68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define IBMVETH_MAX_POOL_COUNT 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define IBMVETH_BUFF_LIST_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define IBMVETH_FILT_LIST_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define IBMVETH_MAX_BUF_SIZE (1024 * 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int pool_count[] = { 256, 512, 256, 256, 256 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int pool_active[] = { 1, 1, 0, 0, 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define IBM_VETH_INVALID_MAP ((u16)0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct ibmveth_buff_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)     u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)     u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)     u32 buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)     u32 threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)     atomic_t available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)     u32 consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)     u32 producer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)     u16 *free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)     dma_addr_t *dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)     struct sk_buff **skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)     int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)     struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct ibmveth_rx_q {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)     u64        index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)     u64        num_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)     u64        toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)     dma_addr_t queue_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)     u32        queue_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)     struct ibmveth_rx_q_entry *queue_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct ibmveth_adapter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)     struct vio_dev *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)     struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)     struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)     unsigned int mcastFilterSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)     void * buffer_list_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)     void * filter_list_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)     dma_addr_t buffer_list_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)     dma_addr_t filter_list_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)     struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)     struct ibmveth_rx_q rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)     int pool_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)     int rx_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)     int large_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)     bool is_active_trunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)     void *bounce_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)     dma_addr_t bounce_buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)     u64 fw_ipv6_csum_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)     u64 fw_ipv4_csum_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)     u64 fw_large_send_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)     /* adapter specific stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)     u64 replenish_task_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)     u64 replenish_no_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)     u64 replenish_add_buff_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)     u64 replenish_add_buff_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)     u64 rx_invalid_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)     u64 rx_no_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)     u64 tx_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)     u64 tx_send_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)     u64 tx_large_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)     u64 rx_large_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)     /* Ethtool settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u8 duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * so we don't need to byteswap the two elements. However since we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * do end up with endian specific ordering of the elements and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * needs correcting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct ibmveth_buf_desc_fields {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u32 flags_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	u32 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	u32 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	u32 flags_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define IBMVETH_BUF_VALID	0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define IBMVETH_BUF_TOGGLE	0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define IBMVETH_BUF_LRG_SND     0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define IBMVETH_BUF_NO_CSUM	0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define IBMVETH_BUF_CSUM_GOOD	0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define IBMVETH_BUF_LEN_MASK	0x00FFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) union ibmveth_buf_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)     u64 desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)     struct ibmveth_buf_desc_fields fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct ibmveth_rx_q_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	__be32 flags_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define IBMVETH_RXQ_TOGGLE		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define IBMVETH_RXQ_TOGGLE_SHIFT	31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define IBMVETH_RXQ_VALID		0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define IBMVETH_RXQ_LRG_PKT		0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define IBMVETH_RXQ_NO_CSUM		0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define IBMVETH_RXQ_CSUM_GOOD		0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define IBMVETH_RXQ_OFF_MASK		0x0000FFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	__be32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	/* correlator is only used by the OS, no need to byte swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	u64 correlator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif /* _IBMVETH_H */