^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005-2006 Fen Systems Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2006-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef EFX_EFX_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define EFX_EFX_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/indirect_call_wrapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ef100_rx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "ef100_tx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "filter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int efx_net_open(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int efx_net_stop(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ef100_enqueue_skb, __efx_enqueue_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) tx_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void *type_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern unsigned int efx_piobuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void __efx_rx_packet(struct efx_channel *channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int n_frags, unsigned int len, u16 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void efx_rx_flush_packet(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (channel->rx_pkt_n_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) INDIRECT_CALL_2(channel->efx->type->rx_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __ef100_rx_packet, __efx_rx_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (efx->type->rx_buf_hash_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ef100_rx_buf_hash_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Maximum number of TCP segments we support for soft-TSO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define EFX_TSO_MAX_SEGS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* The smallest [rt]xq_entries that the driver supports. RX minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * is a bit arbitrary. For TX, we must have space for at least 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * TSO skbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define EFX_RXQ_MIN_ENT 128U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* All EF10 architecture NICs steal one bit of the DMAQ size for various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * other purposes when counting TxQ entries, so we halve the queue size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline bool efx_rss_enabled(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return efx->rss_spread > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * efx_filter_insert_filter - add or replace a filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @efx: NIC in which to insert the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @spec: Specification for the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @replace_equal: Flag for whether the specified filter may replace an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * existing filter with equal priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * On success, return the filter ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * On failure, return a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * If existing filters have equal match values to the new filter spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * then the new filter might replace them or the function might fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * as follows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * 1. If the existing filters have lower priority, or @replace_equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * is set and they have equal priority, replace them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * 2. If the existing filters have higher priority, return -%EPERM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * support delivery to multiple recipients, return -%EEXIST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * This implies that filters for multiple multicast recipients must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * all be inserted with the same priority and @replace_equal = %false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct efx_filter_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bool replace_equal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return efx->type->filter_insert(efx, spec, replace_equal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * efx_filter_remove_id_safe - remove a filter by ID, carefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @efx: NIC from which to remove the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @priority: Priority of filter, as passed to @efx_filter_insert_filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * This function will range-check @filter_id, so it is safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * with a value passed from userland.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 filter_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return efx->type->filter_remove_safe(efx, priority, filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @efx: NIC from which to remove the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @priority: Priority of filter, as passed to @efx_filter_insert_filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @spec: Buffer in which to store filter specification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * This function will range-check @filter_id, so it is safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * with a value passed from userland.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) efx_filter_get_filter_safe(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 filter_id, struct efx_filter_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return efx->type->filter_get_safe(efx, priority, filter_id, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) enum efx_filter_priority priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return efx->type->filter_count_rx_used(efx, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return efx->type->filter_get_rx_id_limit(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 *buf, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return efx->type->filter_get_rx_ids(efx, priority, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* RSS contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline bool efx_rss_active(struct efx_rss_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Ethtool support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) extern const struct ethtool_ops efx_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Global */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned int rx_usecs, bool rx_adaptive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool rx_may_override_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int *rx_usecs, bool *rx_adaptive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Update the generic software stats in the passed stats array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* MTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #ifdef CONFIG_SFC_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) size_t n_parts, size_t sizeof_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline int efx_mtd_probe(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return efx->type->mtd_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void efx_mtd_rename(struct efx_nic *efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void efx_mtd_remove(struct efx_nic *efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline void efx_mtd_rename(struct efx_nic *efx) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline void efx_mtd_remove(struct efx_nic *efx) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline unsigned int efx_vf_size(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 1 << efx->vi_scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline void efx_device_detach_sync(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct net_device *dev = efx->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Lock/freeze all TX queues so that we can be sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * TX scheduler is stopped when we're done and before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * netif_device_present() becomes false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) netif_tx_lock_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) netif_tx_unlock_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) netif_device_attach(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (WARN_ON(down_read_trylock(sem))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) up_read(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bool flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #endif /* EFX_EFX_H */