^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is dual-licensed; you may select either version 2 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * the GNU General Public License ("GPL") or BSD license ("BSD").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This Synopsys DWC XLGMAC software driver and associated documentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * (hereinafter the "Software") is an unsupported proprietary work of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Synopsys, Inc. unless otherwise expressly agreed to in writing between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Synopsys and you. The Software IS NOT an item of Licensed Software or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Licensed Product under any End User Software License Agreement or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Agreement for Licensed Products with Synopsys or any supplement thereto.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Synopsys is a registered trademark of Synopsys, Inc. Other names included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * in the SOFTWARE may be the trademarks of their respective owners.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "dwc-xlgmac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "dwc-xlgmac-reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct xlgmac_desc_data *desc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (desc_data->skb_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (desc_data->mapped_as_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) dma_unmap_page(pdata->dev, desc_data->skb_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) desc_data->skb_dma_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) dma_unmap_single(pdata->dev, desc_data->skb_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) desc_data->skb_dma_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) desc_data->skb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) desc_data->skb_dma_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (desc_data->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dev_kfree_skb_any(desc_data->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) desc_data->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (desc_data->rx.hdr.pa.pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) put_page(desc_data->rx.hdr.pa.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (desc_data->rx.hdr.pa_unmap.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) desc_data->rx.hdr.pa_unmap.pages_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) put_page(desc_data->rx.hdr.pa_unmap.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (desc_data->rx.buf.pa.pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) put_page(desc_data->rx.buf.pa.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (desc_data->rx.buf.pa_unmap.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) desc_data->rx.buf.pa_unmap.pages_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) put_page(desc_data->rx.buf.pa_unmap.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) memset(&desc_data->tx, 0, sizeof(desc_data->tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) memset(&desc_data->rx, 0, sizeof(desc_data->rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) desc_data->mapped_as_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (desc_data->state_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) desc_data->state_saved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) desc_data->state.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) desc_data->state.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) desc_data->state.error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct xlgmac_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (ring->desc_data_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) for (i = 0; i < ring->dma_desc_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) desc_data = XLGMAC_GET_DESC_DATA(ring, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) xlgmac_unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) kfree(ring->desc_data_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ring->desc_data_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (ring->rx_hdr_pa.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) put_page(ring->rx_hdr_pa.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ring->rx_hdr_pa.pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ring->rx_hdr_pa.pages_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ring->rx_hdr_pa.pages_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ring->rx_hdr_pa.pages_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (ring->rx_buf_pa.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) put_page(ring->rx_buf_pa.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ring->rx_buf_pa.pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ring->rx_buf_pa.pages_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ring->rx_buf_pa.pages_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ring->rx_buf_pa.pages_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ring->dma_desc_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dma_free_coherent(pdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) (sizeof(struct xlgmac_dma_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ring->dma_desc_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ring->dma_desc_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ring->dma_desc_head_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ring->dma_desc_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct xlgmac_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned int dma_desc_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ring->dma_desc_count = dma_desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) (sizeof(struct xlgmac_dma_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) dma_desc_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) &ring->dma_desc_head_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!ring->dma_desc_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Array of descriptor data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ring->desc_data_head = kcalloc(dma_desc_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) sizeof(struct xlgmac_desc_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!ring->desc_data_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) netif_dbg(pdata, drv, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ring->dma_desc_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) &ring->dma_desc_head_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ring->desc_data_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!pdata->channel_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) xlgmac_free_ring(pdata, channel->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) xlgmac_free_ring(pdata, channel->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) channel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ret = xlgmac_init_ring(pdata, channel->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pdata->tx_desc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) netdev_alert(pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "error initializing Tx ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto err_init_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) channel->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = xlgmac_init_ring(pdata, channel->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pdata->rx_desc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) netdev_alert(pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) "error initializing Rx ring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto err_init_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err_init_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) xlgmac_free_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!pdata->channel_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) kfree(pdata->channel_head->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pdata->channel_head->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) kfree(pdata->channel_head->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pdata->channel_head->rx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) kfree(pdata->channel_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pdata->channel_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pdata->channel_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct xlgmac_channel *channel_head, *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct xlgmac_ring *tx_ring, *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) channel_head = kcalloc(pdata->channel_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) sizeof(struct xlgmac_channel), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!channel_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) netif_dbg(pdata, drv, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) "channel_head=%p\n", channel_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto err_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto err_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for (i = 0, channel = channel_head; i < pdata->channel_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) channel->pdata = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) channel->queue_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) (DMA_CH_INC * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (pdata->per_channel_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Get the per DMA interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = pdata->channel_irq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) netdev_err(pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) "get_irq %u failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) channel->dma_irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (i < pdata->tx_ring_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) channel->tx_ring = tx_ring++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (i < pdata->rx_ring_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) channel->rx_ring = rx_ring++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) netif_dbg(pdata, drv, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) channel->name, channel->dma_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) channel->tx_ring, channel->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) pdata->channel_head = channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) kfree(rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) err_rx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kfree(tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) err_tx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) kfree(channel_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) xlgmac_free_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) xlgmac_free_channels(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = xlgmac_alloc_channels(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ret = xlgmac_alloc_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) xlgmac_free_channels_and_rings(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct xlgmac_page_alloc *pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) gfp_t gfp, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct page *pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dma_addr_t pages_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Try to obtain pages, decreasing order if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) gfp |= __GFP_COMP | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) while (order >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pages = alloc_pages(gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) order--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Map the pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pages_dma = dma_map_page(pdata->dev, pages, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) PAGE_SIZE << order, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (dma_mapping_error(pdata->dev, pages_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) put_page(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pa->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pa->pages_len = PAGE_SIZE << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pa->pages_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pa->pages_dma = pages_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct xlgmac_page_alloc *pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) get_page(pa->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bd->pa = *pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) bd->dma_base = pa->pages_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bd->dma_off = pa->pages_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bd->dma_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pa->pages_offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if ((pa->pages_offset + len) > pa->pages_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* This data descriptor is responsible for unmapping page(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bd->pa_unmap = *pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Get a new allocation next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pa->pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pa->pages_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pa->pages_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pa->pages_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct xlgmac_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct xlgmac_desc_data *desc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int order, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!ring->rx_hdr_pa.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) GFP_ATOMIC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!ring->rx_buf_pa.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) GFP_ATOMIC, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Set up the header page info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) XLGMAC_SKB_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Set up the buffer page info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pdata->rx_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct xlgmac_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct xlgmac_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_addr_t dma_desc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ring = channel->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dma_desc = ring->dma_desc_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dma_desc_addr = ring->dma_desc_head_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) for (j = 0; j < ring->dma_desc_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) desc_data = XLGMAC_GET_DESC_DATA(ring, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) desc_data->dma_desc = dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) desc_data->dma_desc_addr = dma_desc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dma_desc_addr += sizeof(struct xlgmac_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ring->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ring->dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) memset(&ring->tx, 0, sizeof(ring->tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) hw_ops->tx_desc_init(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct xlgmac_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct xlgmac_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct xlgmac_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dma_addr_t dma_desc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) channel = pdata->channel_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (i = 0; i < pdata->channel_count; i++, channel++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ring = channel->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dma_desc = ring->dma_desc_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dma_desc_addr = ring->dma_desc_head_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) for (j = 0; j < ring->dma_desc_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) desc_data = XLGMAC_GET_DESC_DATA(ring, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) desc_data->dma_desc = dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) desc_data->dma_desc_addr = dma_desc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dma_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dma_desc_addr += sizeof(struct xlgmac_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ring->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ring->dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) hw_ops->rx_desc_init(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct xlgmac_pdata *pdata = channel->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct xlgmac_ring *ring = channel->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned int start_index, cur_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct xlgmac_desc_data *desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned int offset, datalen, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct xlgmac_pkt_info *pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) skb_frag_t *frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned int tso, vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dma_addr_t skb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) start_index = ring->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cur_index = ring->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) pkt_info = &ring->pkt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pkt_info->desc_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) pkt_info->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Save space for a context descriptor if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cur_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (tso) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* Map the TSO header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) skb_dma = dma_map_single(pdata->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pkt_info->header_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (dma_mapping_error(pdata->dev, skb_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) netdev_alert(pdata->netdev, "dma_map_single failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) desc_data->skb_dma = skb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) desc_data->skb_dma_len = pkt_info->header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) netif_dbg(pdata, tx_queued, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) "skb header: index=%u, dma=%pad, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) cur_index, &skb_dma, pkt_info->header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) offset = pkt_info->header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pkt_info->length += pkt_info->header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cur_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Map the (remainder of the) packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) for (datalen = skb_headlen(skb) - offset; datalen; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (dma_mapping_error(pdata->dev, skb_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) netdev_alert(pdata->netdev, "dma_map_single failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) desc_data->skb_dma = skb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) desc_data->skb_dma_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) netif_dbg(pdata, tx_queued, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) "skb data: index=%u, dma=%pad, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) cur_index, &skb_dma, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) datalen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) pkt_info->length += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) cur_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) netif_dbg(pdata, tx_queued, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) "mapping frag %u\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (datalen = skb_frag_size(frag); datalen; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) len = min_t(unsigned int, datalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) XLGMAC_TX_MAX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (dma_mapping_error(pdata->dev, skb_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) netdev_alert(pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) "skb_frag_dma_map failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) desc_data->skb_dma = skb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) desc_data->skb_dma_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) desc_data->mapped_as_page = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) netif_dbg(pdata, tx_queued, pdata->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) "skb frag: index=%u, dma=%pad, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) cur_index, &skb_dma, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) datalen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pkt_info->length += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) cur_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Save the skb address in the last entry. We always have some data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * that has been mapped so desc_data is always advanced past the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * piece of mapped data - use the entry pointed to by cur_index - 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) desc_data->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Save the number of descriptor entries used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pkt_info->desc_count = cur_index - start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return pkt_info->desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) while (start_index < cur_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) xlgmac_unmap_desc_data(pdata, desc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) desc_ops->map_tx_skb = xlgmac_map_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) desc_ops->tx_desc_init = xlgmac_tx_desc_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) desc_ops->rx_desc_init = xlgmac_rx_desc_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }