^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Gigabit Ethernet adapters based on the Session Layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Interface (SLIC) technology by Alacritech. The driver does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * support the hardware acceleration features provided by these cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/u64_stats_sync.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "slic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_NAME "slicoss"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static const struct pci_device_id slic_id_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) PCI_DEVICE_ID_ALACRITECH_MOJAVE) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) PCI_DEVICE_ID_ALACRITECH_OASIS) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) { 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static const char slic_stats_strings[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "rx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "rx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "rx_multicasts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "rx_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "rx_buff_miss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) "rx_tp_csum",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) "rx_tp_oflow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "rx_tp_hlen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) "rx_ip_csum",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) "rx_ip_len",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) "rx_ip_hdr_len",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) "rx_early",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) "rx_buff_oflow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) "rx_lcode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) "rx_drbl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "rx_crc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) "rx_oflow_802",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "rx_uflow_802",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) "tx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "tx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "tx_carrier",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "tx_dropped",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "irq_errs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return (idx + 1) & (qlen - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline int slic_get_free_queue_descs(unsigned int put_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int done_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (put_idx >= done_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return (qlen - (put_idx - done_idx) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return (done_idx - put_idx - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static unsigned int slic_next_compl_idx(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct slic_stat_queue *stq = &sdev->stq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int active = stq->active_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct slic_stat_desc *descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct slic_stat_desc *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) descs = stq->descs[active];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) stat = &descs[stq->done_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!stat->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return SLIC_INVALID_STAT_DESC_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* reset desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) stat->hnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) stat->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* check for wraparound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!stq->done_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dma_addr_t paddr = stq->paddr[active];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) stq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* make sure new status descriptors are immediately available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) stq->active_array = active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* ensure tail idx is updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void slic_clear_upr_list(struct slic_upr_list *upr_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct slic_upr *upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct slic_upr *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) spin_lock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) list_for_each_entry_safe(upr, tmp, &upr_list->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list_del(&upr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) upr_list->pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_unlock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) SLIC_REG_LSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) slic_write(sdev, reg, lower_32_bits(upr->paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct slic_upr_list *upr_list = &sdev->upr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_lock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pending = upr_list->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) INIT_LIST_HEAD(&upr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) list_add_tail(&upr->list, &upr_list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) upr_list->pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_unlock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) slic_start_upr(sdev, upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct slic_upr_list *upr_list = &sdev->upr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct slic_upr *next_upr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct slic_upr *upr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_lock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!list_empty(&upr_list->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) upr = list_first_entry(&upr_list->list, struct slic_upr, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) list_del(&upr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (list_empty(&upr_list->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) upr_list->pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) next_upr = list_first_entry(&upr_list->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct slic_upr, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_unlock_bh(&upr_list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* trigger processing of the next upr in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (next_upr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) slic_start_upr(sdev, next_upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int slic_new_upr(struct slic_device *sdev, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dma_addr_t paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct slic_upr *upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!upr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) upr->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) upr->paddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) slic_queue_upr(sdev, upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u64 mask = *mcmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u8 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) crc = ether_crc(ETH_ALEN, addr) >> 23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* we only have space on the SLIC for 64 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) crc &= 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mask |= (u64)1 << crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *mcmask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* must be called with link_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void slic_configure_rcv(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (sdev->duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) val |= SLIC_GRCR_CTLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (sdev->promisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) val |= SLIC_GRCR_RCVALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) slic_write(sdev, SLIC_REG_WRCFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* must be called with link_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void slic_configure_xmt(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (sdev->duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) val |= SLIC_GXCR_PAUSEEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) slic_write(sdev, SLIC_REG_WXCFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* must be called with link_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void slic_configure_mac(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (sdev->speed == SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) SLIC_GMCR_GBIT; /* enable GMII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (sdev->duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) val |= SLIC_GMCR_FULLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) slic_write(sdev, SLIC_REG_WMCFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void slic_configure_link_locked(struct slic_device *sdev, int speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct net_device *dev = sdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (sdev->speed == speed && sdev->duplex == duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) sdev->speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sdev->duplex = duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (sdev->speed == SPEED_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (netif_carrier_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* (re)configure link settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) slic_configure_mac(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) slic_configure_xmt(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) slic_configure_rcv(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!netif_carrier_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void slic_configure_link(struct slic_device *sdev, int speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned int duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) spin_lock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) slic_configure_link_locked(sdev, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) spin_unlock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void slic_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct netdev_hw_addr *hwaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) bool set_promisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 mcmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Turn on all multicast addresses. We have to do this for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * promiscuous mode as well as ALLMCAST mode (it saves the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * microcode from having to keep state about the MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * configuration).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mcmask = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mcmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) netdev_for_each_mc_addr(hwaddr, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) slic_set_mcast_bit(&mcmask, hwaddr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) set_promisc = !!(dev->flags & IFF_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_lock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (sdev->promisc != set_promisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) sdev->promisc = set_promisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) slic_configure_rcv(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_unlock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void slic_xmit_complete(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct slic_tx_queue *txq = &sdev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct net_device *dev = sdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct slic_tx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned int frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * completions during processing keeps the loop running endlessly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) idx = slic_next_compl_idx(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (idx == SLIC_INVALID_STAT_DESC_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) txq->done_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) buff = &txq->txbuffs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (unlikely(!buff->skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) netdev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "no skb found for desc idx %i\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dma_unmap_single(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dma_unmap_addr(buff, map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bytes += buff->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) frames++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dev_kfree_skb_any(buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) } while (frames < SLIC_MAX_TX_COMPLETIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* make sure xmit sees the new value for done_idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u64_stats_update_begin(&sdev->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) sdev->stats.tx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sdev->stats.tx_packets += frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u64_stats_update_end(&sdev->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) netif_tx_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) netif_tx_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned int maplen = SLIC_RX_BUFF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct slic_rx_queue *rxq = &sdev->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct net_device *dev = sdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct slic_rx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct slic_rx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned int misalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) skb = alloc_skb(maplen + ALIGN_MASK, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) netdev_err(dev, "mapping rx packet failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* drop skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* ensure head buffer descriptors are 256 byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) misalign = paddr & ALIGN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (misalign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) offset = SLIC_RX_BUFF_ALIGN - misalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) skb_reserve(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* the HW expects dma chunks for descriptor + frame data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) desc = (struct slic_rx_desc *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* temporarily sync descriptor for CPU to clear status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_sync_single_for_cpu(&sdev->pdev->dev, paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) offset + sizeof(*desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) desc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* return it to HW again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dma_sync_single_for_device(&sdev->pdev->dev, paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) offset + sizeof(*desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) buff = &rxq->rxbuffs[rxq->put_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) buff->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dma_unmap_addr_set(buff, map_addr, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dma_unmap_len_set(buff, map_len, maplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) buff->addr_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* complete write to descriptor before it is handed to HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* head buffer descriptors are placed immediately before skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static void slic_handle_frame_error(struct slic_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct slic_stats *stats = &sdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (sdev->model == SLIC_MODEL_OASIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct slic_rx_info_oasis *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u32 status_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) info = (struct slic_rx_info_oasis *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) status = le32_to_cpu(info->frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) status_b = le32_to_cpu(info->frame_status_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* transport layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (status_b & SLIC_VRHSTATB_TPCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (status & SLIC_VRHSTAT_TPOFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (status_b & SLIC_VRHSTATB_TPHLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* ip layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (status_b & SLIC_VRHSTATB_IPCSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (status_b & SLIC_VRHSTATB_IPLERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) SLIC_INC_STATS_COUNTER(stats, rx_iplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (status_b & SLIC_VRHSTATB_IPHERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* link layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (status_b & SLIC_VRHSTATB_RCVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) SLIC_INC_STATS_COUNTER(stats, rx_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (status_b & SLIC_VRHSTATB_BUFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (status_b & SLIC_VRHSTATB_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) SLIC_INC_STATS_COUNTER(stats, rx_lcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (status_b & SLIC_VRHSTATB_DRBL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) SLIC_INC_STATS_COUNTER(stats, rx_drbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (status_b & SLIC_VRHSTATB_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) SLIC_INC_STATS_COUNTER(stats, rx_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (status & SLIC_VRHSTAT_802OE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (status_b & SLIC_VRHSTATB_802UE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (status_b & SLIC_VRHSTATB_CARRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) SLIC_INC_STATS_COUNTER(stats, tx_carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) } else { /* mojave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct slic_rx_info_mojave *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) info = (struct slic_rx_info_mojave *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) status = le32_to_cpu(info->frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* transport layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (status & SLIC_VGBSTAT_XPERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (xerr == SLIC_VGBSTAT_XCSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (xerr == SLIC_VGBSTAT_XUFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (xerr == SLIC_VGBSTAT_XHLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* ip layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (status & SLIC_VGBSTAT_NETERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) SLIC_VGBSTAT_NERRMSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (nerr == SLIC_VGBSTAT_NCSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (nerr == SLIC_VGBSTAT_NUFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) SLIC_INC_STATS_COUNTER(stats, rx_iplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (nerr == SLIC_VGBSTAT_NHLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* link layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (status & SLIC_VGBSTAT_LNKERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u32 lerr = status & SLIC_VGBSTAT_LERRMSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (lerr == SLIC_VGBSTAT_LDEARLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) SLIC_INC_STATS_COUNTER(stats, rx_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (lerr == SLIC_VGBSTAT_LBOFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (lerr == SLIC_VGBSTAT_LCODERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) SLIC_INC_STATS_COUNTER(stats, rx_lcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (lerr == SLIC_VGBSTAT_LDBLNBL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) SLIC_INC_STATS_COUNTER(stats, rx_drbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (lerr == SLIC_VGBSTAT_LCRCERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) SLIC_INC_STATS_COUNTER(stats, rx_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (lerr == SLIC_VGBSTAT_LOFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (lerr == SLIC_VGBSTAT_LUFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) SLIC_INC_STATS_COUNTER(stats, rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unsigned int *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct slic_rx_queue *rxq = &sdev->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct net_device *dev = sdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct slic_rx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct slic_rx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned int frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) while (todo && (rxq->done_idx != rxq->put_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) buff = &rxq->rxbuffs[rxq->done_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) skb = buff->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) desc = (struct slic_rx_desc *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dma_sync_single_for_cpu(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_unmap_addr(buff, map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) buff->addr_offset + sizeof(*desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) status = le32_to_cpu(desc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!(status & SLIC_IRHDDR_SVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dma_sync_single_for_device(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dma_unmap_addr(buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) buff->addr_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sizeof(*desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dma_unmap_single(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dma_unmap_addr(buff, map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dma_unmap_len(buff, map_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* skip rx descriptor that is placed before the frame data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (unlikely(status & SLIC_IRHDDR_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) slic_handle_frame_error(sdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct ethhdr *eh = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (is_multicast_ether_addr(eh->h_dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) napi_gro_receive(&sdev->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) frames++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) todo--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u64_stats_update_begin(&sdev->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sdev->stats.rx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sdev->stats.rx_packets += frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) u64_stats_update_end(&sdev->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) slic_refill_rx_queue(sdev, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void slic_handle_link_irq(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct slic_shmem_data *sm_data = sm->shmem_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned int duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) link = le32_to_cpu(sm_data->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (link & SLIC_GIG_LINKUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (link & SLIC_GIG_SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) else if (link & SLIC_GIG_SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) slic_configure_link(sdev, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct slic_upr *upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* remove upr that caused this irq (always the first entry in list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) upr = slic_dequeue_upr(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!upr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) netdev_warn(sdev->netdev, "no upr found on list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (upr->type == SLIC_UPR_LSTAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) slic_queue_upr(sdev, upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) slic_handle_link_irq(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) kfree(upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static int slic_handle_link_change(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void slic_handle_err_irq(struct slic_device *sdev, u32 isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct slic_stats *stats = &sdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (isr & SLIC_ISR_RMISS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) SLIC_INC_STATS_COUNTER(stats, rx_buff_miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (isr & SLIC_ISR_XDROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) SLIC_INC_STATS_COUNTER(stats, tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) SLIC_INC_STATS_COUNTER(stats, irq_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static void slic_handle_irq(struct slic_device *sdev, u32 isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) unsigned int todo, unsigned int *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (isr & SLIC_ISR_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) slic_handle_err_irq(sdev, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (isr & SLIC_ISR_LEVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) slic_handle_link_change(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (isr & SLIC_ISR_UPC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) slic_handle_upr_irq(sdev, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (isr & SLIC_ISR_RCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) slic_handle_receive(sdev, todo, done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (isr & SLIC_ISR_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) slic_xmit_complete(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int slic_poll(struct napi_struct *napi, int todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct slic_device *sdev = container_of(napi, struct slic_device, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct slic_shmem_data *sm_data = sm->shmem_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 isr = le32_to_cpu(sm_data->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) slic_handle_irq(sdev, isr, todo, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (done < todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) napi_complete_done(napi, done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* reenable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) sm_data->isr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* make sure sm_data->isr is cleard before irqs are reenabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) slic_write(sdev, SLIC_REG_ISR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static irqreturn_t slic_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct slic_device *sdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct slic_shmem_data *sm_data = sm->shmem_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* make sure sm_data->isr is read after ICR_INT_MASK is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (!sm_data->isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* spurious interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) slic_write(sdev, SLIC_REG_ISR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) napi_schedule_irqoff(&sdev->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static void slic_card_reset(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u16 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* flush write by means of config space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static int slic_init_stat_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct slic_stat_queue *stq = &sdev->stq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct slic_stat_desc *descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned int misalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) stq->len = SLIC_NUM_STAT_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) stq->active_array = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) stq->done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) netdev_err(sdev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) "failed to allocate status descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto free_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* ensure correct alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) misalign = paddr & DESC_ALIGN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (misalign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) offset = SLIC_STATS_DESC_ALIGN - misalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) descs += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) paddr += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) stq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) stq->descs[i] = descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) stq->paddr[i] = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) stq->addr_offset[i] = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) stq->mem_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) free_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) stq->descs[i] - stq->addr_offset[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) stq->paddr[i] - stq->addr_offset[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static void slic_free_stat_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct slic_stat_queue *stq = &sdev->stq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) stq->descs[i] - stq->addr_offset[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) stq->paddr[i] - stq->addr_offset[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static int slic_init_tx_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct slic_tx_queue *txq = &sdev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct slic_tx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct slic_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) txq->len = SLIC_NUM_TX_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) txq->put_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) txq->done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!txq->txbuffs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) sizeof(*desc), SLIC_TX_DESC_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!txq->dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) netdev_err(sdev->netdev, "failed to create dma pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto free_buffs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) for (i = 0; i < txq->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) buff = &txq->txbuffs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) &buff->desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) netdev_err(sdev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) "failed to alloc pool chunk (%i)\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) goto free_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) desc->hnd = cpu_to_le32((u32)(i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) desc->cmd = SLIC_CMD_XMT_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) desc->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) buff->desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) free_descs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) buff = &txq->txbuffs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dma_pool_destroy(txq->dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) free_buffs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kfree(txq->txbuffs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void slic_free_tx_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct slic_tx_queue *txq = &sdev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct slic_tx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) for (i = 0; i < txq->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) buff = &txq->txbuffs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!buff->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dma_unmap_single(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dma_unmap_addr(buff, map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) consume_skb(buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dma_pool_destroy(txq->dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) kfree(txq->txbuffs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static int slic_init_rx_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct slic_rx_queue *rxq = &sdev->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct slic_rx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rxq->len = SLIC_NUM_RX_LES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rxq->done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rxq->put_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rxq->rxbuffs = buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) slic_refill_rx_queue(sdev, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void slic_free_rx_queue(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct slic_rx_queue *rxq = &sdev->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct slic_rx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* free rx buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) for (i = 0; i < rxq->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) buff = &rxq->rxbuffs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (!buff->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) dma_unmap_single(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dma_unmap_addr(buff, map_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dma_unmap_len(buff, map_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) consume_skb(buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) kfree(rxq->rxbuffs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static void slic_set_link_autoneg(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) unsigned int subid = sdev->pdev->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (sdev->is_fiber) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* We've got a fiber gigabit interface, and register 4 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * different in fiber mode than in copper mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* advertise FD only @1000 Mb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* enable PAUSE frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* reset phy, enable auto-neg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) } else { /* copper gigabit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* We've got a copper gigabit interface, and register 4 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * different in copper mode than in fiber mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* advertise 10/100 Mb modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) val = MII_ADVERTISE << 16 | ADVERTISE_100FULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* enable PAUSE frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* required by the Cicada PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) val |= ADVERTISE_CSMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* advertise FD only @1000 Mb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* if a Marvell PHY enable auto crossover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /* reset phy, enable auto-neg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* enable and restart auto-neg (don't reset) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void slic_set_mac_address(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) u8 *addr = sdev->netdev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) slic_write(sdev, SLIC_REG_WRADDRAL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) slic_write(sdev, SLIC_REG_WRADDRBL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) val = addr[0] << 8 | addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) slic_write(sdev, SLIC_REG_WRADDRAH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) slic_write(sdev, SLIC_REG_WRADDRBH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) int idx = *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) memcpy(&val, fw->data + *offset, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) idx += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) *offset = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return le32_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int slic_load_rcvseq_firmware(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) const char *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) u32 codelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) u32 instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) SLIC_RCV_FIRMWARE_MOJAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = request_firmware(&fw, file, &sdev->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) "failed to load receive sequencer firmware %s\n", file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Do an initial sanity check concerning firmware size now. A further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * check follows below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "invalid firmware size %zu (min %u expected)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) fw->size, SLIC_FIRMWARE_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) codelen = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* do another sanity check against firmware size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if ((codelen + 4) > fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) "invalid rcv-sequencer firmware size %zu\n", fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* download sequencer code to card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) for (addr = 0; addr < codelen; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* write out instruction address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) slic_write(sdev, SLIC_REG_RCV_WCS, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* write out the instruction data low addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) slic_write(sdev, SLIC_REG_RCV_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) val = (__le32)fw->data[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) instr = le32_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* write out the instruction data high addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) slic_write(sdev, SLIC_REG_RCV_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* finish download */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int slic_load_firmware(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) unsigned int datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) const char *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int code_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) u32 numsects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) u32 sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) u32 instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) u32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) SLIC_FIRMWARE_MOJAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) err = request_firmware(&fw, file, &sdev->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Do an initial sanity check concerning firmware size now. A further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * check follows below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) "invalid firmware size %zu (min is %u)\n", fw->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) SLIC_FIRMWARE_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) numsects = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) "invalid number of sections in firmware: %u", numsects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) datalen = numsects * 8 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) for (i = 0; i < numsects; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sectsize[i] = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) datalen += sectsize[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* do another sanity check against firmware size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (datalen > fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) "invalid firmware size %zu (expected >= %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) fw->size, datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* get sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) for (i = 0; i < numsects; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) sectstart[i] = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) code_start = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) for (sect = 0; sect < numsects; sect++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) unsigned int ssize = sectsize[sect] >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) base = sectstart[sect];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) for (addr = 0; addr < ssize; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* write out instruction address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) slic_write(sdev, SLIC_REG_WCS, base + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* write out instruction to low addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) slic_write(sdev, SLIC_REG_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* write out instruction to high addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) slic_write(sdev, SLIC_REG_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) idx = code_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) for (sect = 0; sect < numsects; sect++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) unsigned int ssize = sectsize[sect] >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) base = sectstart[sect];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (base < 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) for (addr = 0; addr < ssize; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* write out instruction address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) slic_write(sdev, SLIC_REG_WCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) SLIC_WCS_COMPARE | (base + addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* write out instruction to low addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) slic_write(sdev, SLIC_REG_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* write out instruction to high addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) slic_write(sdev, SLIC_REG_WCS, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) instr = slic_read_dword_from_firmware(fw, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* everything OK, kick off the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* wait long enough for ucode to init card and reach the mainloop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) mdelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int slic_init_shmem(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct slic_shmem_data *sm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) &paddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!sm_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) sm->shmem_data = sm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) sm->isr_paddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static void slic_free_shmem(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct slic_shmem_data *sm_data = sm->shmem_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sm->isr_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int slic_init_iface(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) sdev->upr_list.pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) err = slic_init_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) netdev_err(sdev->netdev, "failed to init shared memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) err = slic_load_firmware(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) netdev_err(sdev->netdev, "failed to load firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) goto free_sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) err = slic_load_rcvseq_firmware(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) netdev_err(sdev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) "failed to load firmware for receive sequencer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto free_sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) err = slic_init_rx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) goto free_sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = slic_init_tx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) goto free_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) err = slic_init_stat_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) netdev_err(sdev->netdev, "failed to init status queue: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) goto free_txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) napi_enable(&sdev->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* disable irq mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) slic_write(sdev, SLIC_REG_INTAGG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) slic_write(sdev, SLIC_REG_ISR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) slic_set_mac_address(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) spin_lock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) sdev->duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) sdev->speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) spin_unlock_bh(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) slic_set_link_autoneg(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) netdev_err(sdev->netdev, "failed to request irq: %u\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) goto disable_napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* request initial link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) err = slic_handle_link_change(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) netdev_warn(sdev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) "failed to set initial link state: %u\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) disable_napi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) napi_disable(&sdev->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) slic_free_stat_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) free_txq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) slic_free_tx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) free_rxq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) slic_free_rx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) free_sm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) slic_free_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) slic_card_reset(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int slic_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) err = slic_init_iface(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) netdev_err(dev, "failed to initialize interface: %i\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static int slic_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* stop irq handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) napi_disable(&sdev->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) slic_write(sdev, SLIC_REG_ISR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) free_irq(sdev->pdev->irq, sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* turn off RCV and XMT and power down PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) slic_write(sdev, SLIC_REG_WXCFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) slic_write(sdev, SLIC_REG_WRCFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) val = MII_BMCR << 16 | BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) slic_write(sdev, SLIC_REG_WPHY, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) slic_clear_upr_list(&sdev->upr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) slic_write(sdev, SLIC_REG_QUIESCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) slic_free_stat_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) slic_free_tx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) slic_free_rx_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) slic_free_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) slic_card_reset(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct slic_tx_queue *txq = &sdev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct slic_tx_buffer *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct slic_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) u32 cbar_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) u32 maplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) netdev_err(dev, "BUG! not enough tx LEs left: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) slic_get_free_tx_descs(txq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) maplen = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) netdev_err(dev, "failed to map tx buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto drop_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) buff = &txq->txbuffs[txq->put_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) buff->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) dma_unmap_addr_set(buff, map_addr, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) dma_unmap_len_set(buff, map_len, maplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) desc = buff->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) desc->totlen = cpu_to_le32(maplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) desc->paddrl = cpu_to_le32(lower_32_bits(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) desc->paddrh = cpu_to_le32(upper_32_bits(paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) desc->len = cpu_to_le32(maplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) cbar_val = lower_32_bits(buff->desc_paddr) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /* complete writes to RAM and DMA before hardware is informed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) slic_write(sdev, SLIC_REG_CBAR, cbar_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) drop_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void slic_get_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct rtnl_link_stats64 *lst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct slic_stats *stats = &sdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static int slic_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return ARRAY_SIZE(slic_stats_strings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void slic_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct ethtool_stats *eth_stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct slic_stats *stats = &sdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) SLIC_GET_STATS_COUNTER(data[11], stats, rx_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (stringset == ETH_SS_STATS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) data += sizeof(slic_stats_strings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static void slic_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static const struct ethtool_ops slic_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) .get_drvinfo = slic_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) .get_strings = slic_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) .get_ethtool_stats = slic_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .get_sset_count = slic_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static const struct net_device_ops slic_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) .ndo_open = slic_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) .ndo_stop = slic_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .ndo_start_xmit = slic_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .ndo_get_stats64 = slic_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .ndo_set_rx_mode = slic_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) unsigned char *ptr = eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) u32 csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) __le16 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) while (len > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) memcpy(&data, ptr, sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) csum += le16_to_cpu(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) csum += *(u8 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) while (csum >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return ~csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* check eeprom size, magic and checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) const unsigned int MAX_SIZE = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) const unsigned int MIN_SIZE = 98;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) __le16 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) __le16 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (size < MIN_SIZE || size > MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) memcpy(&magic, eeprom, sizeof(magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* cut checksum bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) size -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) memcpy(&csum, eeprom + size, sizeof(csum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static int slic_read_eeprom(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) unsigned int devfn = PCI_FUNC(sdev->pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct slic_shmem *sm = &sdev->shmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct slic_shmem_data *sm_data = sm->shmem_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) const unsigned int MAX_LOOPS = 5000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) unsigned int codesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) unsigned char *eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct slic_upr *upr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) u8 *mac[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) &paddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!eeprom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* setup ISP temporarily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) for (i = 0; i < MAX_LOOPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (i == MAX_LOOPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dev_err(&sdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) "timed out while waiting for eeprom data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) upr = slic_dequeue_upr(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) kfree(upr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) slic_write(sdev, SLIC_REG_ISP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) slic_write(sdev, SLIC_REG_ISR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) slic_flush_write(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) goto free_eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (sdev->model == SLIC_MODEL_OASIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct slic_oasis_eeprom *oee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) oee = (struct slic_oasis_eeprom *)eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) mac[0] = oee->mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) mac[1] = oee->mac2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) codesize = le16_to_cpu(oee->eeprom_code_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct slic_mojave_eeprom *mee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) mee = (struct slic_mojave_eeprom *)eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) mac[0] = mee->mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) mac[1] = mee->mac2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) codesize = le16_to_cpu(mee->eeprom_code_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (!slic_eeprom_valid(eeprom, codesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) goto free_eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* set mac address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) free_eeprom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static int slic_init(struct slic_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) spin_lock_init(&sdev->upper_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) spin_lock_init(&sdev->link_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) INIT_LIST_HEAD(&sdev->upr_list.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) spin_lock_init(&sdev->upr_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) u64_stats_init(&sdev->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) slic_card_reset(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) err = slic_load_firmware(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) dev_err(&sdev->pdev->dev, "failed to load firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) /* we need the shared memory to read EEPROM so set it up temporarily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) err = slic_init_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) err = slic_read_eeprom(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) dev_err(&sdev->pdev->dev, "failed to read eeprom\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) goto free_sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) slic_card_reset(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) slic_free_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) free_sm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) slic_free_shmem(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static bool slic_is_fiber(unsigned short subdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) switch (subdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Mojave */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* Oasis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static void slic_configure_pci(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) u16 old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) u16 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) pci_read_config_word(pdev, PCI_COMMAND, &old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (old != cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) pci_write_config_word(pdev, PCI_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct slic_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) dev_err(&pdev->dev, "failed to enable PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) pci_try_set_mwi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) slic_configure_pci(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dev_err(&pdev->dev, "failed to setup DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) err = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) dev_err(&pdev->dev, "failed to obtain PCI regions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) goto disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev = alloc_etherdev(sizeof(*sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) dev_err(&pdev->dev, "failed to alloc ethernet device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) goto free_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) dev->netdev_ops = &slic_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dev->hw_features = NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) dev->features |= dev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) dev->ethtool_ops = &slic_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) sdev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) sdev->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) sdev->regs = ioremap(pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) pci_resource_len(pdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (!sdev->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) dev_err(&pdev->dev, "failed to map registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) err = slic_init(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dev_err(&pdev->dev, "failed to initialize driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) dev_err(&pdev->dev, "failed to register net device: %i\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) iounmap(sdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) free_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static void slic_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct slic_device *sdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) iounmap(sdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) static struct pci_driver slic_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) .id_table = slic_id_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) .probe = slic_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) .remove = slic_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) module_pci_driver(slic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) MODULE_LICENSE("GPL");