^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2016-2019 HabanaLabs, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "habanalabs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "../include/hw_ip/mmu/mmu_general.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define MMU_ADDR_BUF_SIZE 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define MMU_ASID_BUF_SIZE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static struct dentry *hl_debug_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u8 i2c_reg, long *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct cpucp_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (hl_device_disabled_or_in_reset(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) CPUCP_PKT_CTL_OPCODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) pkt.i2c_bus = i2c_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pkt.i2c_addr = i2c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pkt.i2c_reg = i2c_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 i2c_reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct cpucp_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (hl_device_disabled_or_in_reset(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) CPUCP_PKT_CTL_OPCODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) pkt.i2c_bus = i2c_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) pkt.i2c_addr = i2c_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pkt.i2c_reg = i2c_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pkt.value = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct cpucp_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (hl_device_disabled_or_in_reset(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) CPUCP_PKT_CTL_OPCODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pkt.led_index = cpu_to_le32(led);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pkt.value = cpu_to_le64(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int command_buffers_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct hl_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_lock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) seq_puts(s, "---------------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) " %03llu %d 0x%08x %d %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cb->id, cb->ctx->asid, cb->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) kref_read(&cb->refcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cb->mmap, cb->cs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int command_submission_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct hl_cs *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_lock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) seq_puts(s, "------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) " %llu %d %d %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cs->sequence, cs->ctx->asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kref_read(&cs->refcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) cs->submitted, cs->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int command_submission_jobs_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct hl_cs_job *job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_lock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) seq_puts(s, "---------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (job->cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) " %02d %llu %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) job->id, job->cs->sequence, job->cs->ctx->asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) job->hw_queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) " %02d 0 %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_unlock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int userptr_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct hl_userptr *userptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) "DMA_FROM_DEVICE", "DMA_NONE"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_lock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) seq_puts(s, " user virtual address size dma dir\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) seq_puts(s, "----------------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) " 0x%-14llx %-10u %-30s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) userptr->addr, userptr->size, dma_dir[userptr->dir]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_unlock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int vm_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct hl_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct hl_vm *vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct hl_vm_hash_node *hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct hl_userptr *userptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) enum vm_type_t *vm_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool once = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u64 j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!dev_entry->hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_lock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) once = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) seq_puts(s, "\n\n----------------------------------------------------");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) seq_puts(s, "\n----------------------------------------------------\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) seq_printf(s, "ctx asid: %u\n", ctx->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) seq_puts(s, "\nmappings:\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) seq_puts(s, " virtual address size handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) seq_puts(s, "----------------------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mutex_lock(&ctx->mem_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) hash_for_each(ctx->mem_hash, i, hnode, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) vm_type = hnode->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (*vm_type == VM_TYPE_USERPTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) userptr = hnode->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) " 0x%-14llx %-10u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) hnode->vaddr, userptr->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) phys_pg_pack = hnode->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) seq_printf(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) " 0x%-14llx %-10llu %-4u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) hnode->vaddr, phys_pg_pack->total_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) phys_pg_pack->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_unlock(&ctx->mem_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) vm = &ctx->hdev->vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_lock(&vm->idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!idr_is_empty(&vm->phys_pg_pack_handles))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) seq_puts(s, "\n\nallocations:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (phys_pg_pack->asid != ctx->asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) seq_printf(s, "page size: %u\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) phys_pg_pack->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) seq_puts(s, " physical address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) seq_puts(s, "---------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for (j = 0 ; j < phys_pg_pack->npages ; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) seq_printf(s, " 0x%-14llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) phys_pg_pack->pages[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_unlock(&vm->idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* these inline functions are copied from mmu.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static inline u64 get_hop0_addr(struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return ctx->hdev->asic_prop.mmu_pgt_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u64 virt_addr, u64 mask, u64 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ((virt_addr & mask) >> shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mmu_specs->hop0_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mmu_specs->hop1_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) mmu_specs->hop2_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) mmu_specs->hop3_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mmu_specs->hop4_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct hl_mmu_properties *mmu_specs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u64 hop_addr, u64 vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mmu_specs->hop5_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline u64 get_next_hop_addr(u64 curr_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (curr_pte & PAGE_PRESENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return curr_pte & HOP_PHYS_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int mmu_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct hl_device *hdev = dev_entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct hl_mmu_properties *mmu_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct hl_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bool is_dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) virt_addr = dev_entry->mmu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ctx = hdev->kernel_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ctx = hdev->compute_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dev_err(hdev->dev, "no ctx available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) prop->dmmu.start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) prop->dmmu.end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* shifts and masks are the same in PMMU and HPMMU, use one of them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mutex_lock(&ctx->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* the following lookup is copied from unmap() in mmu.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) hop0_addr = get_hop0_addr(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) hop1_addr = get_next_hop_addr(hop0_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (hop1_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) hop2_addr = get_next_hop_addr(hop1_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (hop2_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) hop3_addr = get_next_hop_addr(hop2_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (hop3_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!(hop3_pte & LAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) hop4_addr = get_next_hop_addr(hop3_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (hop4_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) hop4_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) hop4_pte = hdev->asic_funcs->read_pte(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) hop4_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!(hop4_pte & PAGE_PRESENT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!(hop3_pte & PAGE_PRESENT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) hop4_addr = get_next_hop_addr(hop3_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (hop4_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) hop4_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hop4_pte = hdev->asic_funcs->read_pte(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) hop4_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!(hop4_pte & LAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) hop5_addr = get_next_hop_addr(hop4_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (hop5_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) hop5_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) hop5_pte = hdev->asic_funcs->read_pte(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) hop5_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!(hop5_pte & PAGE_PRESENT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!(hop4_pte & PAGE_PRESENT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dev_entry->mmu_asid, dev_entry->mmu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!(hop3_pte & LAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!(hop4_pte & LAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) not_mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_unlock(&ctx->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) size_t count, loff_t *f_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct seq_file *s = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct hl_device *hdev = dev_entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) char kbuf[MMU_KBUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) char *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (count > sizeof(kbuf) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (copy_from_user(kbuf, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kbuf[count] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) c = strchr(kbuf, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *c = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (strncmp(c+1, "0x", 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int engines_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct hl_debugfs_entry *entry = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct hl_device *hdev = dev_entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (atomic_read(&hdev->in_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_warn_ratelimited(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) "Can't check device idle during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) hdev->asic_funcs->is_device_idle(hdev, NULL, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (hdev->dram_supports_virtual_memory &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (addr >= prop->pmmu.start_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) addr < prop->pmmu.end_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (addr >= prop->pmmu_huge.start_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) addr < prop->pmmu_huge.end_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) u64 *phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct hl_ctx *ctx = hdev->compute_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct hl_mmu_properties *mmu_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u64 hop_addr, hop_pte_addr, hop_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u64 offset_mask = HOP4_MASK | FLAGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) bool is_dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dev_err(hdev->dev, "no ctx available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) prop->dmmu.start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) prop->dmmu.end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* shifts and masks are the same in PMMU and HPMMU, use one of them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mutex_lock(&ctx->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* hop 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) hop_addr = get_hop0_addr(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* hop 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) hop_addr = get_next_hop_addr(hop_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (hop_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* hop 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) hop_addr = get_next_hop_addr(hop_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (hop_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* hop 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) hop_addr = get_next_hop_addr(hop_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (hop_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!(hop_pte & LAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* hop 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) hop_addr = get_next_hop_addr(hop_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (hop_addr == ULLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) offset_mask = FLAGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (!(hop_pte & PAGE_PRESENT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto not_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) not_mapped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) mutex_unlock(&ctx->mmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static ssize_t hl_data_read32(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) char tmp_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u64 addr = entry->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (atomic_read(&hdev->in_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (hl_is_device_va(hdev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rc = device_va_to_pa(hdev, addr, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) sprintf(tmp_buf, "0x%08x\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return simple_read_from_buffer(buf, count, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) strlen(tmp_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static ssize_t hl_data_write32(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u64 addr = entry->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (atomic_read(&hdev->in_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rc = kstrtouint_from_user(buf, count, 16, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (hl_is_device_va(hdev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rc = device_va_to_pa(hdev, addr, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static ssize_t hl_data_read64(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) char tmp_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) u64 addr = entry->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (hl_is_device_va(hdev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rc = device_va_to_pa(hdev, addr, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) sprintf(tmp_buf, "0x%016llx\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return simple_read_from_buffer(buf, count, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) strlen(tmp_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static ssize_t hl_data_write64(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) u64 addr = entry->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rc = kstrtoull_from_user(buf, count, 16, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (hl_is_device_va(hdev, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rc = device_va_to_pa(hdev, addr, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static ssize_t hl_get_power_state(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) char tmp_buf[200];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (hdev->pdev->current_state == PCI_D0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else if (hdev->pdev->current_state == PCI_D3hot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) i = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) i = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) sprintf(tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return simple_read_from_buffer(buf, count, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) strlen(tmp_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) rc = kstrtouint_from_user(buf, count, 10, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (value == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pci_set_power_state(hdev->pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pci_restore_state(hdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rc = pci_enable_device(hdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } else if (value == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pci_save_state(hdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pci_disable_device(hdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pci_set_power_state(hdev->pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dev_dbg(hdev->dev, "invalid power state value %u\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) char tmp_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) entry->i2c_reg, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev_err(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) "Failed to read from I2C bus %d, addr %d, reg %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sprintf(tmp_buf, "0x%02lx\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) strlen(tmp_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rc = kstrtouint_from_user(buf, count, 16, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) entry->i2c_reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) dev_err(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static ssize_t hl_led0_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rc = kstrtouint_from_user(buf, count, 10, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) value = value ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) hl_debugfs_led_set(hdev, 0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static ssize_t hl_led1_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) rc = kstrtouint_from_user(buf, count, 10, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) value = value ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) hl_debugfs_led_set(hdev, 1, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static ssize_t hl_led2_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rc = kstrtouint_from_user(buf, count, 10, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) value = value ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) hl_debugfs_led_set(hdev, 2, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static ssize_t hl_device_read(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static const char *help =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static ssize_t hl_device_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) char data[30] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* don't allow partial writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (*ppos != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) simple_write_to_buffer(data, 29, ppos, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (strncmp("disable", data, strlen("disable")) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) hdev->disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) } else if (strncmp("enable", data, strlen("enable")) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) hdev->disabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) hdev->asic_funcs->suspend(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) } else if (strncmp("resume", data, strlen("resume")) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) hdev->asic_funcs->resume(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) hdev->device_cpu_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) dev_err(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) count = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) char tmp_buf[200];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) strlen(tmp_buf) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (atomic_read(&hdev->in_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dev_warn_ratelimited(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) "Can't change clock gating during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) rc = kstrtoull_from_user(buf, count, 16, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) hdev->clock_gating_mask = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) hdev->asic_funcs->set_clock_gating(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) char tmp_buf[200];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) strlen(tmp_buf) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct hl_device *hdev = entry->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (atomic_read(&hdev->in_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dev_warn_ratelimited(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) "Can't change stop on error during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) rc = kstrtouint_from_user(buf, count, 10, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) hdev->stop_on_err = value ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) hl_device_reset(hdev, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static const struct file_operations hl_data32b_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .read = hl_data_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .write = hl_data_write32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const struct file_operations hl_data64b_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .read = hl_data_read64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .write = hl_data_write64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static const struct file_operations hl_i2c_data_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .read = hl_i2c_data_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .write = hl_i2c_data_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static const struct file_operations hl_power_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .read = hl_get_power_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .write = hl_set_power_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static const struct file_operations hl_led0_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .write = hl_led0_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static const struct file_operations hl_led1_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .write = hl_led1_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static const struct file_operations hl_led2_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .write = hl_led2_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static const struct file_operations hl_device_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .read = hl_device_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .write = hl_device_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static const struct file_operations hl_clk_gate_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .read = hl_clk_gate_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .write = hl_clk_gate_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static const struct file_operations hl_stop_on_err_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .read = hl_stop_on_err_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .write = hl_stop_on_err_write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static const struct hl_info_list hl_debugfs_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {"command_buffers", command_buffers_show, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {"command_submission", command_submission_show, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {"command_submission_jobs", command_submission_jobs_show, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {"userptr", userptr_show, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {"vm", vm_show, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {"mmu", mmu_show, mmu_asid_va_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {"engines", engines_show, NULL}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static int hl_debugfs_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct hl_debugfs_entry *node = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return single_open(file, node->info_ent->show, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) size_t count, loff_t *f_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct hl_debugfs_entry *node = file->f_inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (node->info_ent->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return node->info_ent->write(file, buf, count, f_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static const struct file_operations hl_debugfs_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .open = hl_debugfs_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .write = hl_debugfs_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) void hl_debugfs_add_device(struct hl_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) int count = ARRAY_SIZE(hl_debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct hl_debugfs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct dentry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) dev_entry->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) dev_entry->entry_arr = kmalloc_array(count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) sizeof(struct hl_debugfs_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (!dev_entry->entry_arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) INIT_LIST_HEAD(&dev_entry->file_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) INIT_LIST_HEAD(&dev_entry->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) INIT_LIST_HEAD(&dev_entry->cs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) INIT_LIST_HEAD(&dev_entry->cs_job_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) INIT_LIST_HEAD(&dev_entry->userptr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) mutex_init(&dev_entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) spin_lock_init(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_lock_init(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) spin_lock_init(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) spin_lock_init(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) hl_debug_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) debugfs_create_x64("addr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) &dev_entry->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) debugfs_create_file("data32",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) &hl_data32b_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) debugfs_create_file("data64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) &hl_data64b_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) debugfs_create_file("set_power_state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) &hl_power_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) debugfs_create_u8("i2c_bus",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) &dev_entry->i2c_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) debugfs_create_u8("i2c_addr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) &dev_entry->i2c_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) debugfs_create_u8("i2c_reg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) &dev_entry->i2c_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) debugfs_create_file("i2c_data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) &hl_i2c_data_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) debugfs_create_file("led0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) &hl_led0_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) debugfs_create_file("led1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) &hl_led1_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) debugfs_create_file("led2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) &hl_led2_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) debugfs_create_file("device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) &hl_device_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) debugfs_create_file("clk_gate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 0200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) &hl_clk_gate_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) debugfs_create_file("stop_on_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dev_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) &hl_stop_on_err_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ent = debugfs_create_file(hl_debugfs_list[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) dev_entry->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) &hl_debugfs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) entry->dent = ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) entry->info_ent = &hl_debugfs_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) entry->dev_entry = dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) void hl_debugfs_remove_device(struct hl_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) debugfs_remove_recursive(entry->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) mutex_destroy(&entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) kfree(entry->entry_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) void hl_debugfs_add_file(struct hl_fpriv *hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) mutex_lock(&dev_entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) list_add(&hpriv->debugfs_list, &dev_entry->file_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) mutex_unlock(&dev_entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) mutex_lock(&dev_entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) list_del(&hpriv->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) mutex_unlock(&dev_entry->file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) void hl_debugfs_add_cb(struct hl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) spin_lock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) list_add(&cb->debugfs_list, &dev_entry->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) spin_unlock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) void hl_debugfs_remove_cb(struct hl_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) spin_lock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) list_del(&cb->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) spin_unlock(&dev_entry->cb_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) void hl_debugfs_add_cs(struct hl_cs *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) spin_lock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) list_add(&cs->debugfs_list, &dev_entry->cs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) spin_unlock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) void hl_debugfs_remove_cs(struct hl_cs *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) spin_lock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) list_del(&cs->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) spin_unlock(&dev_entry->cs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) spin_lock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) list_add(&job->debugfs_list, &dev_entry->cs_job_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) spin_unlock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) spin_lock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) list_del(&job->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) spin_unlock(&dev_entry->cs_job_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) spin_lock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) spin_unlock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) void hl_debugfs_remove_userptr(struct hl_device *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct hl_userptr *userptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) spin_lock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) list_del(&userptr->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) spin_unlock(&dev_entry->userptr_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) spin_lock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) spin_lock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) list_del(&ctx->debugfs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) void __init hl_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) hl_debug_root = debugfs_create_dir("habanalabs", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) void hl_debugfs_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) debugfs_remove_recursive(hl_debug_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }