^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2016-2019 HabanaLabs, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "habanalabs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * struct hl_eqe_work - This structure is used to schedule work of EQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * entry and cpucp_reset event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @eq_work: workqueue object to run when EQ entry is received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @hdev: pointer to device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @eq_entry: copy of the EQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct hl_eqe_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct work_struct eq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct hl_device *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct hl_eq_entry eq_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * hl_cq_inc_ptr - increment ci or pi of cq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @ptr: the current ci or pi value of the completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Increment ptr by 1. If it reaches the number of completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * entries, set it to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) inline u32 hl_cq_inc_ptr(u32 ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (unlikely(ptr == HL_CQ_LENGTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * hl_eq_inc_ptr - increment ci of eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @ptr: the current ci value of the event queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Increment ptr by 1. If it reaches the number of event queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * entries, set it to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) inline u32 hl_eq_inc_ptr(u32 ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (unlikely(ptr == HL_EQ_LENGTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static void irq_handle_eqe(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) eq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct hl_device *hdev = eqe_work->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) kfree(eqe_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * hl_irq_handler_cq - irq handler for completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @irq: irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @arg: pointer to completion queue structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) irqreturn_t hl_irq_handler_cq(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct hl_cq *cq = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct hl_device *hdev = cq->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct hl_hw_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct hl_cs_job *job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool shadow_index_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u16 shadow_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct hl_cq_entry *cq_entry, *cq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (hdev->disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dev_dbg(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "Device disabled but received IRQ %d for CQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) irq, cq->hw_queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cq_base = cq->kernel_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) CQ_ENTRY_READY_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) >> CQ_ENTRY_READY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (!entry_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Make sure we read CQ entry contents after we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * checked the ownership bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) CQ_ENTRY_SHADOW_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) queue = &hdev->kernel_queues[cq->hw_queue_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if ((shadow_index_valid) && (!hdev->disabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) atomic_inc(&queue->ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Clear CQ entry ready bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ~CQ_ENTRY_READY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cq->ci = hl_cq_inc_ptr(cq->ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Increment free slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) atomic_inc(&cq->free_slots_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * hl_irq_handler_eq - irq handler for event queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @irq: irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @arg: pointer to event queue structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) irqreturn_t hl_irq_handler_eq(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct hl_eq *eq = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct hl_device *hdev = eq->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct hl_eq_entry *eq_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct hl_eq_entry *eq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct hl_eqe_work *handle_eqe_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) eq_base = eq->kernel_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool entry_ready =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!entry_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) eq_entry = &eq_base[eq->ci];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Make sure we read EQ entry contents after we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * checked the ownership bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (hdev->disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dev_warn(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "Device disabled but received IRQ %d for EQ\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto skip_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (handle_eqe_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) handle_eqe_work->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) memcpy(&handle_eqe_work->eq_entry, eq_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) sizeof(*eq_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) skip_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Clear EQ entry ready bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) eq_entry->hdr.ctl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ~EQ_CTL_READY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) eq->ci = hl_eq_inc_ptr(eq->ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * hl_cq_init - main initialization function for an cq object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @hdev: pointer to device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @q: pointer to cq structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @hw_queue_id: The H/W queue ID this completion queue belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Allocate dma-able memory for the completion queue and initialize fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Returns 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) &q->bus_address, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) q->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) q->kernel_address = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) q->hw_queue_id = hw_queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) q->ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) q->pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * hl_cq_fini - destroy completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @hdev: pointer to device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @q: pointer to cq structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Free the completion queue memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) q->kernel_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) q->bus_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) q->ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) q->pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * It's not enough to just reset the PI/CI because the H/W may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * written valid completion entries before it was halted and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * we need to clean the actual queues so we won't process old entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * when the device is operational again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * hl_eq_init - main initialization function for an event queue object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @hdev: pointer to device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @q: pointer to eq structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Allocate dma-able memory for the event queue and initialize fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Returns 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) HL_EQ_SIZE_IN_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) &q->bus_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) q->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) q->kernel_address = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) q->ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * hl_eq_fini - destroy event queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @hdev: pointer to device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @q: pointer to eq structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Free the event queue memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) flush_workqueue(hdev->eq_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) HL_EQ_SIZE_IN_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) q->kernel_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) q->ci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * It's not enough to just reset the PI/CI because the H/W may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * written valid completion entries before it was halted and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * we need to clean the actual queues so we won't process old entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * when the device is operational again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }