^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2008-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "nic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "farch_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mcdi_pcol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Management-Controller-to-Driver Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) **************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define MCDI_RPC_TIMEOUT (10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* A reboot/assertion causes the MCDI status word to be set after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * command word is set or a REBOOT event is sent. If we notice a reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * via these mechanisms then wait 250ms for the status word to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define MCDI_STATUS_DELAY_US 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define MCDI_STATUS_DELAY_COUNT 2500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MCDI_STATUS_SLEEP_MS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SEQ_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct efx_mcdi_async_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) size_t inlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool quiet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) efx_mcdi_async_completer *complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* followed by request/response buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void efx_mcdi_timeout_async(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool *was_attached_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static bool efx_mcdi_poll_once(struct efx_nic *efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void efx_mcdi_abandon(struct efx_nic *efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static bool mcdi_logging_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) module_param(mcdi_logging_default, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) MODULE_PARM_DESC(mcdi_logging_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "Enable MCDI logging on newly-probed functions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int efx_mcdi_init(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct efx_mcdi_iface *mcdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bool already_attached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mcdi->efx = efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* consuming code assumes buffer is page-sized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!mcdi->logging_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) mcdi->logging_enabled = mcdi_logging_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) init_waitqueue_head(&mcdi->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) init_waitqueue_head(&mcdi->proxy_rx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) spin_lock_init(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mcdi->state = MCDI_STATE_QUIESCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) mcdi->mode = MCDI_MODE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) spin_lock_init(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) INIT_LIST_HEAD(&mcdi->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (void) efx_mcdi_poll_reboot(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mcdi->new_epoch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Recover from a failed assertion before probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rc = efx_mcdi_handle_assertion(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Let the MC (and BMC, if this is a LOM) know that the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * is loaded. We should do this before we reset the NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rc = efx_mcdi_drv_attach(efx, true, &already_attached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) "Unable to register driver with MCPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (already_attached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Not a fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "Host already registered with MCPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (efx->mcdi->fn_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) efx->primary = efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) free_page((unsigned long)mcdi->logging_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kfree(efx->mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) efx->mcdi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void efx_mcdi_detach(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Relinquish the device (back to the BMC, if this is a LOM) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) efx_mcdi_drv_attach(efx, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void efx_mcdi_fini(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) free_page((unsigned long)efx->mcdi->iface.logging_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) kfree(efx->mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const efx_dword_t *inbuf, size_t inlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) char *buf = mcdi->logging_buffer; /* page-sized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) efx_dword_t hdr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) size_t hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 xflags, seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) spin_lock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ++mcdi->seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) seqno = mcdi->seqno & SEQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) spin_unlock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) xflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (mcdi->mode == MCDI_MODE_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) xflags |= MCDI_HEADER_XFLAGS_EVREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (efx->type->mcdi_max_ver == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* MCDI v1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EFX_POPULATE_DWORD_7(hdr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) MCDI_HEADER_RESPONSE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) MCDI_HEADER_RESYNC, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) MCDI_HEADER_CODE, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) MCDI_HEADER_DATALEN, inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MCDI_HEADER_SEQ, seqno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) MCDI_HEADER_XFLAGS, xflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) hdr_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* MCDI v2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) EFX_POPULATE_DWORD_7(hdr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) MCDI_HEADER_RESPONSE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) MCDI_HEADER_RESYNC, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) MCDI_HEADER_DATALEN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) MCDI_HEADER_SEQ, seqno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) MCDI_HEADER_XFLAGS, xflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) EFX_POPULATE_DWORD_2(hdr[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hdr_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Lengths should always be a whole number of dwords, so scream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * if they're not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) WARN_ON_ONCE(hdr_len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) WARN_ON_ONCE(inlen % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* We own the logging buffer, as only one MCDI can be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * progress on a NIC at any one time. So no need for locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) " %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) le32_to_cpu(hdr[i].u32[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) " %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) le32_to_cpu(inbuf[i].u32[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mcdi->new_epoch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int efx_mcdi_errno(unsigned int mcdi_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) switch (mcdi_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define TRANSLATE_ERROR(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) case MC_CMD_ERR_ ## name: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return -name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) TRANSLATE_ERROR(EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) TRANSLATE_ERROR(ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) TRANSLATE_ERROR(EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) TRANSLATE_ERROR(EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) TRANSLATE_ERROR(EACCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) TRANSLATE_ERROR(EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) TRANSLATE_ERROR(EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) TRANSLATE_ERROR(EDEADLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) TRANSLATE_ERROR(ENOSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) TRANSLATE_ERROR(ETIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) TRANSLATE_ERROR(EALREADY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) TRANSLATE_ERROR(ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #undef TRANSLATE_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) case MC_CMD_ERR_ENOTSUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case MC_CMD_ERR_ALLOC_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case MC_CMD_ERR_MAC_EXIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void efx_mcdi_read_response_header(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int respseq, respcmd, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) char *buf = mcdi->logging_buffer; /* page-sized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) efx_dword_t hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) efx->type->mcdi_read_response(efx, &hdr, 0, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (respcmd != MC_CMD_V2_EXTN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mcdi->resp_hdr_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) efx->type->mcdi_read_response(efx, &hdr, 4, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mcdi->resp_hdr_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mcdi->resp_data_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_SFC_MCDI_LOGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) size_t hdr_len, data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) hdr_len = mcdi->resp_hdr_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * to dword size, and the MCDI buffer is always dword size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* We own the logging buffer, as only one MCDI can be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * progress on a NIC at any one time. So no need for locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) " %08x", le32_to_cpu(hdr.u32[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) efx->type->mcdi_read_response(efx, &hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mcdi->resp_hdr_len + (i * 4), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) " %08x", le32_to_cpu(hdr.u32[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mcdi->resprc_raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (error && mcdi->resp_data_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mcdi->resprc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) respseq, mcdi->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mcdi->resprc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } else if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mcdi->resprc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static bool efx_mcdi_poll_once(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!efx->type->mcdi_poll_response(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) spin_lock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) efx_mcdi_read_response_header(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) spin_unlock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int efx_mcdi_poll(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long time, finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int spins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rc = efx_mcdi_poll_reboot(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) mcdi->resprc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mcdi->resp_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mcdi->resp_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * because generally mcdi responses are fast. After that, back off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * and poll once a jiffy (approximately)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spins = USER_TICK_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) finish = jiffies + MCDI_RPC_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (spins != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) --spins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (efx_mcdi_poll_once(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (time_after(time, finish))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Return rc=0 like wait_event_timeout() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Test and clear MC-rebooted flag for this port/function; reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * software state as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int efx_mcdi_poll_reboot(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return efx->type->mcdi_poll_reboot(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return cmpxchg(&mcdi->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) MCDI_STATE_QUIESCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Wait until the interface becomes QUIESCENT and we win the race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * to mark it RUNNING_SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) wait_event(mcdi->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cmpxchg(&mcdi->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) MCDI_STATE_QUIESCENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int efx_mcdi_await_completion(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) MCDI_RPC_TIMEOUT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Check if efx_mcdi_set_mode() switched us back to polled completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * completed the request first, then we'll just end up completing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * request again, which is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * wait_event_timeout() implicitly provides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (mcdi->mode == MCDI_MODE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return efx_mcdi_poll(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * requester. Return whether this was done. Does not take any locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (cmpxchg(&mcdi->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) MCDI_STATE_RUNNING_SYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) wake_up(&mcdi->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (mcdi->mode == MCDI_MODE_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct efx_mcdi_async_param *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct efx_nic *efx = mcdi->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Process the asynchronous request queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_lock_bh(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) async = list_first_entry_or_null(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) &mcdi->async_list, struct efx_mcdi_async_param, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) mcdi->state = MCDI_STATE_RUNNING_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) efx_mcdi_send_request(efx, async->cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (const efx_dword_t *)(async + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) async->inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mod_timer(&mcdi->async_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) jiffies + MCDI_RPC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_unlock_bh(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mcdi->state = MCDI_STATE_QUIESCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) wake_up(&mcdi->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * asynchronous completion function, and release the interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Return whether this was done. Must be called in bh-disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * context. Will take iface_lock and async_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct efx_nic *efx = mcdi->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct efx_mcdi_async_param *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) size_t hdr_len, data_len, err_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) efx_dword_t *outbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) MCDI_DECLARE_BUF_ERR(errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (cmpxchg(&mcdi->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) MCDI_STATE_RUNNING_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) spin_lock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Ensure that if the completion event arrives later,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * the seqno check in efx_mcdi_ev_cpl() will fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ++mcdi->seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ++mcdi->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rc = mcdi->resprc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) hdr_len = mcdi->resp_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) data_len = mcdi->resp_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) spin_unlock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Stop the timer. In case the timer function is running, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * must wait for it to return so that there is no possibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * of it aborting the next request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) del_timer_sync(&mcdi->async_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) async = list_first_entry(&mcdi->async_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct efx_mcdi_async_param, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) list_del(&async->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_unlock(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) outbuf = (efx_dword_t *)(async + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) efx->type->mcdi_read_response(efx, outbuf, hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) min(async->outlen, data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!timeout && rc && !async->quiet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) err_len = min(sizeof(errbuf), data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) efx->type->mcdi_read_response(efx, errbuf, hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sizeof(errbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) err_len, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (async->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) async->complete(efx, async->cookie, rc, outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) min(async->outlen, data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) kfree(async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) efx_mcdi_release(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned int datalen, unsigned int mcdi_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bool wake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) spin_lock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (mcdi->credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* The request has been cancelled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) --mcdi->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) "MC response mismatch tx seq 0x%x rx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) "seq 0x%x\n", seqno, mcdi->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (efx->type->mcdi_max_ver >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* MCDI v2 responses don't fit in an event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) efx_mcdi_read_response_header(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mcdi->resprc = efx_mcdi_errno(mcdi_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mcdi->resp_hdr_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mcdi->resp_data_len = datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) wake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_unlock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!efx_mcdi_complete_async(mcdi, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) (void) efx_mcdi_complete_sync(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* If the interface isn't RUNNING_ASYNC or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * RUNNING_SYNC then we've received a duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * completion after we've already transitioned back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * QUIESCENT. [A subsequent invocation would increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * seqno, so would have failed the seqno check].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void efx_mcdi_timeout_async(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) efx_mcdi_complete_async(mcdi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (efx->type->mcdi_max_ver < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) (efx->type->mcdi_max_ver < 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) (efx->type->mcdi_max_ver < 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) inlen > MCDI_CTL_SDU_LEN_MAX_V1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) size_t hdr_len, size_t data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) u32 *proxy_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) MCDI_DECLARE_BUF_ERR(testbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) const size_t buflen = sizeof(testbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!proxy_handle || data_len < buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) size_t *outlen_actual, bool quiet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) u32 *proxy_handle, int *raw_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) MCDI_DECLARE_BUF_ERR(errbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (mcdi->mode == MCDI_MODE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rc = efx_mcdi_poll(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) rc = efx_mcdi_await_completion(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) "MC command 0x%x inlen %d mode %d timed out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cmd, (int)inlen, mcdi->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) "MCDI request was completed without an event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) efx_mcdi_abandon(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Close the race with efx_mcdi_ev_cpl() executing just too late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * and completing a request we've just cancelled, by ensuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * that the seqno check therein fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_lock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ++mcdi->seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ++mcdi->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) spin_unlock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (proxy_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *proxy_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *outlen_actual = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) size_t hdr_len, data_len, err_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* At the very least we need a memory barrier here to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * we pick up changes from efx_mcdi_ev_cpl(). Protect against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * a spurious efx_mcdi_ev_cpl() running concurrently by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * acquiring the iface_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_lock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rc = mcdi->resprc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (raw_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *raw_rc = mcdi->resprc_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) hdr_len = mcdi->resp_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) data_len = mcdi->resp_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) err_len = min(sizeof(errbuf), data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) spin_unlock_bh(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) BUG_ON(rc > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) efx->type->mcdi_read_response(efx, outbuf, hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) min(outlen, data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *outlen_actual = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (cmd == MC_CMD_REBOOT && rc == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Don't reset if MC_CMD_REBOOT returns EIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else if (rc == -EIO || rc == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) cmd, -rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (efx->type->mcdi_reboot_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) efx->type->mcdi_reboot_detected(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else if (proxy_handle && (rc == -EPROTO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) proxy_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) mcdi->proxy_rx_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mcdi->proxy_rx_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mcdi->state = MCDI_STATE_PROXY_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) } else if (rc && !quiet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (rc == -EIO || rc == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) msleep(MCDI_STATUS_SLEEP_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) efx_mcdi_poll_reboot(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) mcdi->new_epoch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!proxy_handle || !*proxy_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) efx_mcdi_release(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Interrupt the proxy wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mcdi->proxy_rx_status = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) wake_up(&mcdi->proxy_rx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u32 handle, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) mcdi->proxy_rx_status = efx_mcdi_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Ensure the status is written before we update the handle, since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * latter is used to check if we've finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mcdi->proxy_rx_handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) wake_up(&mcdi->proxy_rx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Wait for a proxy event, or timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) rc = wait_event_timeout(mcdi->proxy_rx_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mcdi->proxy_rx_handle != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) mcdi->proxy_rx_status == -EINTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) MCDI_RPC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (rc <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) "MCDI proxy timeout %d\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) } else if (mcdi->proxy_rx_handle != handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) netif_warn(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) "MCDI proxy unexpected handle %d (expected %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mcdi->proxy_rx_handle, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return mcdi->proxy_rx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) size_t *outlen_actual, bool quiet, int *raw_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (inbuf && inlen && (inbuf == outbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* The input buffer can't be aliased with the output. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) outlen_actual, quiet, &proxy_handle, raw_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (proxy_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* Handle proxy authorisation. This allows approval of MCDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * operations to be delegated to the admin function, allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * fine control over (eg) multicast subscriptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) "MCDI waiting for proxy auth %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) proxy_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) "MCDI proxy retry %d\n", proxy_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* We now retry the original request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) mcdi->state = MCDI_STATE_RUNNING_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) efx_mcdi_send_request(efx, cmd, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) outbuf, outlen, outlen_actual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) quiet, NULL, raw_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) "MC command 0x%x failed after proxy auth rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cmd, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (rc == -EINTR || rc == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) efx_mcdi_release(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) size_t *outlen_actual, bool quiet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int raw_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) outbuf, outlen, outlen_actual, true, &raw_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) efx->type->is_vf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* If the EVB port isn't available within a VF this may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * mean the PF is still bringing the switch up. We should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * retry our request shortly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned int delay_us = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) "%s: NO_EVB_PORT; will retry request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) usleep_range(delay_us, delay_us + 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) outbuf, outlen, outlen_actual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) true, &raw_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (delay_us < 100000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) delay_us <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) } while ((rc == -EPROTO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) time_before(jiffies, abort_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) efx_mcdi_display_error(efx, cmd, inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) outbuf, outlen, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * efx_mcdi_rpc - Issue an MCDI command and wait for completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * @efx: NIC through which to issue the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * @cmd: Command type number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * @inbuf: Command parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @inlen: Length of command parameters, in bytes. Must be a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @outbuf: Response buffer. May be %NULL if @outlen is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * @outlen: Length of response buffer, in bytes. If the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * response is longer than @outlen & ~3, it will be truncated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * to that length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @outlen_actual: Pointer through which to return the actual response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * length. May be %NULL if this is not needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * This function may sleep and therefore must be called in an appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Return: A negative error code, or zero if successful. The error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * code may come from the MCDI response or may indicate a failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * to communicate with the MC. In the former case, the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * will still be copied to @outbuf and *@outlen_actual will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * set accordingly. In the latter case, *@outlen_actual will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) size_t *outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) outlen_actual, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Normally, on receiving an error code in the MCDI response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * efx_mcdi_rpc will log an error message containing (among other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * things) the raw error code, by means of efx_mcdi_display_error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * This _quiet version suppresses that; if the caller wishes to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * the error conditionally on the return code, it should call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * function and is then responsible for calling efx_mcdi_display_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) size_t *outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) outlen_actual, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) const efx_dword_t *inbuf, size_t inlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rc = efx_mcdi_check_supported(efx, cmd, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (efx->mc_bist_for_other_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (mcdi->mode == MCDI_MODE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) efx_mcdi_acquire_sync(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) efx_mcdi_send_request(efx, cmd, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) efx_mcdi_async_completer *complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned long cookie, bool quiet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct efx_mcdi_async_param *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rc = efx_mcdi_check_supported(efx, cmd, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (efx->mc_bist_for_other_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) async->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) async->inlen = inlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) async->outlen = outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) async->quiet = quiet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) async->complete = complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) async->cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) memcpy(async + 1, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_lock_bh(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (mcdi->mode == MCDI_MODE_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) list_add_tail(&async->list, &mcdi->async_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* If this is at the front of the queue, try to start it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (mcdi->async_list.next == &async->list &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) efx_mcdi_acquire_async(mcdi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) efx_mcdi_send_request(efx, cmd, inbuf, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) mod_timer(&mcdi->async_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) jiffies + MCDI_RPC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) kfree(async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) rc = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) spin_unlock_bh(&mcdi->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * @efx: NIC through which to issue the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @cmd: Command type number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @inbuf: Command parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @inlen: Length of command parameters, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @outlen: Length to allocate for response buffer, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @complete: Function to be called on completion or cancellation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * @cookie: Arbitrary value to be passed to @complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * This function does not sleep and therefore may be called in atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * context. It will fail if event queues are disabled or if MCDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * event completions have been disabled due to an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * If it succeeds, the @complete function will be called exactly once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * in atomic context, when one of the following occurs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * (a) the completion event is received (in NAPI context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * (b) event queues are disabled (in the process that disables them)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * (c) the request times-out (in timer context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) const efx_dword_t *inbuf, size_t inlen, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) efx_mcdi_async_completer *complete, unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) cookie, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) const efx_dword_t *inbuf, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) size_t outlen, efx_mcdi_async_completer *complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) cookie, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) size_t *outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) outlen_actual, false, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) efx_dword_t *outbuf, size_t outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) size_t *outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) outlen_actual, true, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) size_t inlen, efx_dword_t *outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) size_t outlen, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int code = 0, err_arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) code = MCDI_DWORD(outbuf, ERR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) err_arg = MCDI_DWORD(outbuf, ERR_ARG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) cmd, inlen, rc, code, err_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* Switch to polled MCDI completions. This can be called in various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * error conditions with various locks held, so it must be lockless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * Caller is responsible for flushing asynchronous requests later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) void efx_mcdi_mode_poll(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct efx_mcdi_iface *mcdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* If already in polling mode, nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * If in fail-fast state, don't switch to polled completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * FLR recovery will do that later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* We can switch from event completion to polled completion, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * mcdi requests are always completed in shared memory. We do this by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * switching the mode to POLL'd then completing the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * efx_mcdi_await_completion() will then call efx_mcdi_poll().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * which efx_mcdi_complete_sync() provides for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mcdi->mode = MCDI_MODE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) efx_mcdi_complete_sync(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Flush any running or queued asynchronous requests, after event processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void efx_mcdi_flush_async(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct efx_mcdi_async_param *async, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct efx_mcdi_iface *mcdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* We must be in poll or fail mode so no more requests can be queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) del_timer_sync(&mcdi->async_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* If a request is still running, make sure we give the MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * time to complete it so that the response won't overwrite our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * next request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) efx_mcdi_poll(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) mcdi->state = MCDI_STATE_QUIESCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Nothing else will access the async list now, so it is safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * to walk it without holding async_lock. If we hold it while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * calling a completer then lockdep may warn that we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * acquired locks in the wrong order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (async->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) list_del(&async->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) kfree(async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) void efx_mcdi_mode_event(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct efx_mcdi_iface *mcdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!efx->mcdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* If already in event completion mode, nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * If in fail-fast state, don't switch to event completion. FLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * recovery will do that later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* We can't switch from polled to event completion in the middle of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * request, because the completion method is specified in the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * So acquire the interface to serialise the requestors. We don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * to acquire the iface_lock to change the mode here, but we do need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * write memory barrier ensure that efx_mcdi_rpc() sees it, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * efx_mcdi_acquire() provides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) efx_mcdi_acquire_sync(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) mcdi->mode = MCDI_MODE_EVENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) efx_mcdi_release(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* If there is an outstanding MCDI request, it has been terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * either by a BADASSERT or REBOOT event. If the mcdi interface is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * in polled mode, then do nothing because the MC reboot handler will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * set the header correctly. However, if the mcdi interface is waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * for a CMDDONE event it won't receive it [and since all MCDI events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * are sent to the same queue, we can't be racing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * efx_mcdi_ev_cpl()]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * If there is an outstanding asynchronous request, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * complete it now (efx_mcdi_complete() would deadlock). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * reset process will take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * There's a race here with efx_mcdi_send_request(), because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * we might receive a REBOOT event *before* the request has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * been copied out. In polled mode (during startup) this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * irrelevant, because efx_mcdi_complete_sync() is ignored. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * event mode, this condition is just an edge-case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * receiving a REBOOT event after posting the MCDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * request. Did the mc reboot before or after the copyout? The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * best we can do always is just return failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * If there is an outstanding proxy response expected it is not going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * to arrive. We should thus abort it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) spin_lock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) efx_mcdi_proxy_abort(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (efx_mcdi_complete_sync(mcdi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (mcdi->mode == MCDI_MODE_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mcdi->resprc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) mcdi->resp_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mcdi->resp_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ++mcdi->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* Consume the status word since efx_mcdi_rpc_finish() won't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) rc = efx_mcdi_poll_reboot(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) udelay(MCDI_STATUS_DELAY_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* On EF10, a CODE_MC_REBOOT event can be received without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * reboot detection in efx_mcdi_poll_reboot() being triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * If zero was returned from the final call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * MC has definitely rebooted so prepare for the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!rc && efx->type->mcdi_reboot_detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) efx->type->mcdi_reboot_detected(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) mcdi->new_epoch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* Nobody was waiting for an MCDI request, so trigger a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) spin_unlock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* The MC is going down in to BIST mode. set the BIST flag to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * (which doesn't actually execute a reset, it waits for the controlling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * function to reset it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void efx_mcdi_ev_bist(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_lock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) efx->mc_bist_for_other_fn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) efx_mcdi_proxy_abort(mcdi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (efx_mcdi_complete_sync(mcdi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (mcdi->mode == MCDI_MODE_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) mcdi->resprc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) mcdi->resp_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) mcdi->resp_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ++mcdi->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) mcdi->new_epoch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) spin_unlock(&mcdi->iface_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void efx_mcdi_abandon(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return; /* it had already been done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void efx_handle_drain_event(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (atomic_dec_and_test(&efx->active_queues))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) wake_up(&efx->flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) WARN_ON(atomic_read(&efx->active_queues) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) void efx_mcdi_process_event(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) case MCDI_EVENT_CODE_BADSSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) "MC watchdog or assertion failure at 0x%x\n", data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) efx_mcdi_ev_death(efx, -EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) case MCDI_EVENT_CODE_PMNOTICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) case MCDI_EVENT_CODE_CMDDONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) efx_mcdi_ev_cpl(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) case MCDI_EVENT_CODE_LINKCHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) efx_mcdi_process_link_change(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) case MCDI_EVENT_CODE_SENSOREVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) efx_sensor_event(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) case MCDI_EVENT_CODE_SCHEDERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) "MC Scheduler alert (0x%x)\n", data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) case MCDI_EVENT_CODE_REBOOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case MCDI_EVENT_CODE_MC_REBOOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) efx_mcdi_ev_death(efx, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) case MCDI_EVENT_CODE_MC_BIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) efx_mcdi_ev_bist(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) case MCDI_EVENT_CODE_MAC_STATS_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* MAC stats are gather lazily. We can ignore this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case MCDI_EVENT_CODE_FLR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (efx->type->sriov_flr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) efx->type->sriov_flr(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) MCDI_EVENT_FIELD(*event, FLR_VF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) case MCDI_EVENT_CODE_PTP_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case MCDI_EVENT_CODE_PTP_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) case MCDI_EVENT_CODE_PTP_PPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) efx_ptp_event(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case MCDI_EVENT_CODE_PTP_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) efx_time_sync_event(channel, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) case MCDI_EVENT_CODE_TX_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) case MCDI_EVENT_CODE_RX_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* Two flush events will be sent: one to the same event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * queue as completions, and one to event queue 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * flag will be set, and we should ignore the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * because we want to wait for all completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) efx_handle_drain_event(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) case MCDI_EVENT_CODE_TX_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) case MCDI_EVENT_CODE_RX_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) "%s DMA error (event: "EFX_QWORD_FMT")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) case MCDI_EVENT_CODE_PROXY_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) efx_mcdi_ev_proxy_response(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) "Unknown MCDI event " EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * Specific request functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) **************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) size_t outlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) const __le16 *ver_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) outbuf, sizeof(outbuf), &outlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) offset = scnprintf(buf, len, "%u.%u.%u.%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) le16_to_cpu(ver_words[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) le16_to_cpu(ver_words[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) le16_to_cpu(ver_words[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) le16_to_cpu(ver_words[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (efx->type->print_additional_fwver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) offset += efx->type->print_additional_fwver(efx, buf + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) len - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* It's theoretically possible for the string to exceed 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * characters, though in practice the first three version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * components are short enough that this doesn't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (WARN_ON(offset >= len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) bool *was_attached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) driver_operating ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * specified will fail with EPERM, and we have to tell the MC we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * care what firmware we get.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (rc == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) MC_CMD_FW_DONT_CARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) sizeof(inbuf), outbuf, sizeof(outbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) outbuf, outlen, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (driver_operating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) efx->mcdi->fn_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* Synthesise flags for Siena */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) efx->mcdi->fn_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) (efx_port_num(efx) == 0) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* We currently assume we have control of the external link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * and are completely trusted by firmware. Abort probing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * if that's not true for this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (was_attached != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u16 *fw_subtype_list, u32 *capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) size_t outlen, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int port_num = efx_port_num(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* we need __aligned(2) for ether_addr_copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (mac_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ether_addr_copy(mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) port_num ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (fw_subtype_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) for (i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) i < MCDI_VAR_ARRAY_LEN(outlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) fw_subtype_list[i] = MCDI_ARRAY_WORD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) fw_subtype_list[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (capabilities) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) *capabilities = MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) *capabilities = MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) __func__, rc, (int)outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) u32 dest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (uart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (evq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* This function finds types using the new NVRAM_PARTITIONS mcdi. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) u32 *nvram_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (!outbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) *number * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) kfree(outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) size_t *size_out, size_t *erase_size_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) bool *protected_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) outbuf, sizeof(outbuf), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) case MC_CMD_NVRAM_TEST_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case MC_CMD_NVRAM_TEST_NOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /* This function tests nvram partitions using the new mcdi partition lookup scheme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) unsigned int number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (!nvram_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* Require at least one check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) for (i = 0; i < number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) kfree(nvram_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int efx_mcdi_nvram_test_all(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) u32 nvram_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) rc = efx_mcdi_nvram_types(efx, &nvram_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) while (nvram_types != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (nvram_types & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) rc = efx_mcdi_nvram_test(efx, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) type++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) nvram_types >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) __func__, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* Returns 1 if an assertion was read, 0 if no assertion had fired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * negative on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static int efx_mcdi_read_assertion(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) unsigned int flags, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) const char *reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) /* Attempt to read any stored assertion state before we reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * the mcfw out of the assertion handler. Retry twice, once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * because a boot-time assertion might cause this command to fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * with EINTR. And once again because GET_ASSERTS can race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * MC_CMD_REBOOT running on the other port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) retry = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (rc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) outlen, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* Print out any recorded assertion state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) ? "system-level assertion"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ? "thread-level assertion"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) ? "watchdog reset"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) : "unknown assertion";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* Print out the registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) for (index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 1 + index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static int efx_mcdi_exit_assertion(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* If the MC is running debug firmware, it might now be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * waiting for a debugger to attach, but we just want it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * reboot. We set a flag that makes the command a no-op if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * has already done so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * The MCDI will thus return either 0 or -EIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (rc == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) NULL, 0, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int efx_mcdi_handle_assertion(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) rc = efx_mcdi_read_assertion(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (rc <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return efx_mcdi_exit_assertion(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static int efx_mcdi_reset_func(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) static int efx_mcdi_reset_mc(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* White is black, and up is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (rc == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return RESET_TYPE_RECOVER_OR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* If MCDI is down, we can't handle_assertion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (method == RESET_TYPE_MCDI_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) rc = pci_reset_function(efx->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* Re-enable polled MCDI completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (efx->mcdi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) mcdi->mode = MCDI_MODE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /* Recover from a failed assertion pre-reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) rc = efx_mcdi_handle_assertion(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (method == RESET_TYPE_DATAPATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) else if (method == RESET_TYPE_WORLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return efx_mcdi_reset_mc(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return efx_mcdi_reset_func(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) const u8 *mac, int *id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) MC_CMD_FILTER_MODE_SIMPLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) *id_out = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) *id_out = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) int efx_mcdi_flush_rxqs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) MCDI_DECLARE_BUF(inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int rc, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) BUILD_BUG_ON(EFX_MAX_CHANNELS >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) efx_for_each_channel_rx_queue(rx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (rx_queue->flush_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rx_queue->flush_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) atomic_dec(&efx->rxq_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) MCDI_SET_ARRAY_DWORD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) count, efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) WARN_ON(rc < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) unsigned int *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) *flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) unsigned int *enabled_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (impl_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (enabled_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * terrifying. The call site will have to deal with it though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) #ifdef CONFIG_SFC_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) #define EFX_MCDI_NVRAM_LEN_MAX 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) loff_t offset, u8 *buffer, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) MCDI_DECLARE_BUF(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) loff_t offset, const u8 *buffer, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) MCDI_DECLARE_BUF(inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) loff_t offset, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) int rc, rc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) /* Always set this flag. Old firmware ignores it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) "NVRAM update failed verification with code 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) rc2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) switch (rc2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) "Unknown response to NVRAM_UPDATE_FINISH\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) size_t len, size_t *retlen, u8 *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct efx_nic *efx = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) loff_t offset = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) loff_t end = min_t(loff_t, start + len, mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) size_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) buffer, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) buffer += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) *retlen = offset - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct efx_nic *efx = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) loff_t end = min_t(loff_t, start + len, mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) size_t chunk = part->common.mtd.erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (!part->updating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) part->updating = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /* The MCDI interface can in fact do multiple erase blocks at once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * but erasing may be slow, so we make multiple calls here to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * tripping the MCDI RPC timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) size_t len, size_t *retlen, const u8 *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct efx_nic *efx = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) loff_t offset = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) loff_t end = min_t(loff_t, start + len, mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) size_t chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (!part->updating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) part->updating = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) buffer, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) offset += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) buffer += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) *retlen = offset - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) int efx_mcdi_mtd_sync(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) struct efx_nic *efx = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (part->updating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) part->updating = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct efx_mcdi_mtd_partition *mcdi_part =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) container_of(part, struct efx_mcdi_mtd_partition, common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct efx_nic *efx = part->mtd.priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) snprintf(part->name, sizeof(part->name), "%s %s:%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) efx->name, part->type_name, mcdi_part->fw_subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) #endif /* CONFIG_SFC_MTD */