^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2007 Nuova Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "vnic_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "vnic_rq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct vnic_rq_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int i, j, count = rq->ring.desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) for (i = 0; i < blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (!rq->bufs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) printk(KERN_ERR "Failed to alloc rq_bufs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) for (i = 0; i < blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) buf = rq->bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) buf->desc = (u8 *)rq->ring.descs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rq->ring.desc_size * buf->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (buf->index + 1 == count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) buf->next = rq->bufs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) buf->next = rq->bufs[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) buf->next = buf + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) rq->to_use = rq->to_clean = rq->bufs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) rq->buf_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void vnic_rq_free(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct vnic_dev *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) vdev = rq->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) vnic_dev_free_desc_ring(vdev, &rq->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree(rq->bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rq->bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) rq->ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int desc_count, unsigned int desc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rq->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rq->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!rq->ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) vnic_rq_disable(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) err = vnic_rq_alloc_bufs(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) vnic_rq_free(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int error_interrupt_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int error_interrupt_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u64 paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 fetch_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writeq(paddr, &rq->ctrl->ring_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) iowrite32(cq_index, &rq->ctrl->cq_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) iowrite32(0, &rq->ctrl->dropped_packet_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) iowrite32(0, &rq->ctrl->error_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Use current fetch_index as the ring starting point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) fetch_index = ioread32(&rq->ctrl->fetch_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rq->to_use = rq->to_clean =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) iowrite32(fetch_index, &rq->ctrl->posted_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rq->buf_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned int vnic_rq_error_status(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return ioread32(&rq->ctrl->error_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void vnic_rq_enable(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) iowrite32(1, &rq->ctrl->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int vnic_rq_disable(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) iowrite32(0, &rq->ctrl->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Wait for HW to ACK disable request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (wait = 0; wait < 100; wait++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!(ioread32(&rq->ctrl->running)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void vnic_rq_clean(struct vnic_rq *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct vnic_rq_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 fetch_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) WARN_ON(ioread32(&rq->ctrl->enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) buf = rq->to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) while (vnic_rq_desc_used(rq) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) (*buf_clean)(rq, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) buf = rq->to_clean = buf->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rq->ring.desc_avail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Use current fetch_index as the ring starting point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) fetch_index = ioread32(&rq->ctrl->fetch_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rq->to_use = rq->to_clean =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) iowrite32(fetch_index, &rq->ctrl->posted_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rq->buf_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) vnic_dev_clear_desc_ring(&rq->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)