^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Microsemi Switchtec(tm) PCIe Management Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2017, Microsemi Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/switchtec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/switchtec_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) MODULE_VERSION("0.1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) MODULE_AUTHOR("Microsemi Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int max_devices = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) module_param(max_devices, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static bool use_dma_mrpc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) module_param(use_dma_mrpc, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) MODULE_PARM_DESC(use_dma_mrpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "Enable the use of the DMA MRPC feature");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int nirqs = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) module_param(nirqs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static dev_t switchtec_devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static DEFINE_IDA(switchtec_minor_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct class *switchtec_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) EXPORT_SYMBOL_GPL(switchtec_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) enum mrpc_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) MRPC_IDLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MRPC_QUEUED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MRPC_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MRPC_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct switchtec_user {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum mrpc_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) wait_queue_head_t cmd_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 return_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) size_t data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) size_t read_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int event_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct switchtec_user *stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!stuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) get_device(&stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) stuser->stdev = stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) kref_init(&stuser->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) INIT_LIST_HEAD(&stuser->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) init_waitqueue_head(&stuser->cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) stuser->event_cnt = atomic_read(&stdev->event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void stuser_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct switchtec_user *stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) stuser = container_of(kref, struct switchtec_user, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) put_device(&stuser->stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kfree(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void stuser_put(struct switchtec_user *stuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) kref_put(&stuser->kref, stuser_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void stuser_set_state(struct switchtec_user *stuser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) enum mrpc_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* requires the mrpc_mutex to already be held when called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) const char * const state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) [MRPC_IDLE] = "IDLE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) [MRPC_QUEUED] = "QUEUED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) [MRPC_RUNNING] = "RUNNING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) [MRPC_DONE] = "DONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) stuser->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) stuser, state_names[state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void mrpc_complete_cmd(struct switchtec_dev *stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void flush_wc_buf(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * odb (outbound doorbell) register is processed by low latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * hardware and w/o side effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) SWITCHTEC_NTB_REG_DBMSG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ioread32(&mmio_dbmsg->odb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void mrpc_cmd_submit(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* requires the mrpc_mutex to already be held when called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct switchtec_user *stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (stdev->mrpc_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (list_empty(&stdev->mrpc_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (stdev->dma_mrpc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) stuser_set_state(stuser, MRPC_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) stdev->mrpc_busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) memcpy_toio(&stdev->mmio_mrpc->input_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) stuser->data, stuser->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) flush_wc_buf(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) schedule_delayed_work(&stdev->mrpc_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int mrpc_queue_cmd(struct switchtec_user *stuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* requires the mrpc_mutex to already be held when called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct switchtec_dev *stdev = stuser->stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) kref_get(&stuser->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) stuser->read_len = sizeof(stuser->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) stuser_set_state(stuser, MRPC_QUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) stuser->cmd_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) list_add_tail(&stuser->list, &stdev->mrpc_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mrpc_cmd_submit(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void mrpc_complete_cmd(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* requires the mrpc_mutex to already be held when called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct switchtec_user *stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (list_empty(&stdev->mrpc_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) stuser->status = stdev->dma_mrpc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) stuser->status = ioread32(&stdev->mmio_mrpc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) stuser_set_state(stuser, MRPC_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) stuser->return_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) stuser->return_code = stdev->dma_mrpc->rtn_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (stuser->return_code != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) memcpy(stuser->data, &stdev->dma_mrpc->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) stuser->read_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) stuser->read_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) stuser->cmd_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) wake_up_interruptible(&stuser->cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) list_del_init(&stuser->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) stuser_put(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) stdev->mrpc_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) mrpc_cmd_submit(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void mrpc_event_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) stdev = container_of(work, struct switchtec_dev, mrpc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dev_dbg(&stdev->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mutex_lock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cancel_delayed_work(&stdev->mrpc_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mrpc_complete_cmd(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void mrpc_timeout_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_dbg(&stdev->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mutex_lock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) status = stdev->dma_mrpc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) status = ioread32(&stdev->mmio_mrpc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) schedule_delayed_work(&stdev->mrpc_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) mrpc_complete_cmd(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static ssize_t device_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ver = ioread32(&stdev->mmio_sys_info->device_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return sprintf(buf, "%x\n", ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static DEVICE_ATTR_RO(device_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static ssize_t fw_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ver = ioread32(&stdev->mmio_sys_info->firmware_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return sprintf(buf, "%08x\n", ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static DEVICE_ATTR_RO(fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) memcpy_fromio(buf, attr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) buf[len] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) buf[len + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = len - 1; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (buf[i] != ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) buf[i] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) buf[i + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define DEVICE_ATTR_SYS_INFO_STR(field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static ssize_t field ## _show(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct device_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct switchtec_dev *stdev = to_stdev(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (stdev->gen == SWITCHTEC_GEN3) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return io_string_show(buf, &si->gen3.field, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) sizeof(si->gen3.field)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) else if (stdev->gen == SWITCHTEC_GEN4) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return io_string_show(buf, &si->gen4.field, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sizeof(si->gen4.field)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -ENOTSUPP; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static DEVICE_ATTR_RO(field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) DEVICE_ATTR_SYS_INFO_STR(vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) DEVICE_ATTR_SYS_INFO_STR(product_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) DEVICE_ATTR_SYS_INFO_STR(product_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static ssize_t component_vendor_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* component_vendor field not supported after gen3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (stdev->gen != SWITCHTEC_GEN3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return sprintf(buf, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return io_string_show(buf, &si->gen3.component_vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sizeof(si->gen3.component_vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static DEVICE_ATTR_RO(component_vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static ssize_t component_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* component_id field not supported after gen3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (stdev->gen != SWITCHTEC_GEN3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return sprintf(buf, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return sprintf(buf, "PM%04X\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static DEVICE_ATTR_RO(component_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static ssize_t component_revision_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* component_revision field not supported after gen3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (stdev->gen != SWITCHTEC_GEN3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return sprintf(buf, "255\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return sprintf(buf, "%d\n", rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static DEVICE_ATTR_RO(component_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static ssize_t partition_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return sprintf(buf, "%d\n", stdev->partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static DEVICE_ATTR_RO(partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static ssize_t partition_count_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return sprintf(buf, "%d\n", stdev->partition_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static DEVICE_ATTR_RO(partition_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static struct attribute *switchtec_device_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) &dev_attr_device_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) &dev_attr_fw_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) &dev_attr_vendor_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) &dev_attr_product_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) &dev_attr_product_revision.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) &dev_attr_component_vendor.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) &dev_attr_component_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) &dev_attr_component_revision.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) &dev_attr_partition.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) &dev_attr_partition_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ATTRIBUTE_GROUPS(switchtec_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int switchtec_dev_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct switchtec_user *stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) stuser = stuser_create(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (IS_ERR(stuser))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return PTR_ERR(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) filp->private_data = stuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) stream_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int switchtec_dev_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct switchtec_user *stuser = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) stuser_put(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (mutex_lock_interruptible(&stdev->mrpc_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!stdev->alive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) size_t size, loff_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct switchtec_user *stuser = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct switchtec_dev *stdev = stuser->stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (size < sizeof(stuser->cmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) size > sizeof(stuser->cmd) + sizeof(stuser->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) stuser->data_len = size - sizeof(stuser->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rc = lock_mutex_and_test_alive(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (stuser->state != MRPC_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) rc = -EBADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) !capable(CAP_SYS_ADMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) data += sizeof(stuser->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) rc = mrpc_queue_cmd(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) size_t size, loff_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct switchtec_user *stuser = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct switchtec_dev *stdev = stuser->stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (size < sizeof(stuser->cmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) size > sizeof(stuser->cmd) + sizeof(stuser->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) rc = lock_mutex_and_test_alive(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (stuser->state == MRPC_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return -EBADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) stuser->read_len = size - sizeof(stuser->return_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (filp->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!stuser->cmd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rc = wait_event_interruptible(stuser->cmd_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) stuser->cmd_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) rc = lock_mutex_and_test_alive(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (stuser->state != MRPC_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -EBADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rc = copy_to_user(data, &stuser->return_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sizeof(stuser->return_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) data += sizeof(stuser->return_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) rc = copy_to_user(data, &stuser->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) size - sizeof(stuser->return_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) stuser_set_state(stuser, MRPC_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct switchtec_user *stuser = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct switchtec_dev *stdev = stuser->stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) __poll_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) poll_wait(filp, &stuser->cmd_comp, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) poll_wait(filp, &stdev->event_wq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (lock_mutex_and_test_alive(stdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (stuser->cmd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ret |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret |= EPOLLPRI | EPOLLRDBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int ioctl_flash_info(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct switchtec_ioctl_flash_info __user *uinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct switchtec_ioctl_flash_info info = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (stdev->gen == SWITCHTEC_GEN3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) info.flash_length = ioread32(&fi->gen3.flash_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else if (stdev->gen == SWITCHTEC_GEN4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) info.flash_length = ioread32(&fi->gen4.flash_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (copy_to_user(uinfo, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct partition_info __iomem *pi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) info->address = ioread32(&pi->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) info->length = ioread32(&pi->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int flash_part_info_gen3(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct switchtec_ioctl_flash_part_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct flash_info_regs_gen3 __iomem *fi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) &stdev->mmio_flash_info->gen3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 active_addr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) switch (info->flash_partition) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) case SWITCHTEC_IOCTL_PART_CFG0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) active_addr = ioread32(&fi->active_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) set_fw_info_part(info, &fi->cfg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) case SWITCHTEC_IOCTL_PART_CFG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) active_addr = ioread32(&fi->active_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) set_fw_info_part(info, &fi->cfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case SWITCHTEC_IOCTL_PART_IMG0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) active_addr = ioread32(&fi->active_img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) set_fw_info_part(info, &fi->img0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case SWITCHTEC_IOCTL_PART_IMG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) active_addr = ioread32(&fi->active_img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) set_fw_info_part(info, &fi->img1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) case SWITCHTEC_IOCTL_PART_NVLOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) set_fw_info_part(info, &fi->nvlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) case SWITCHTEC_IOCTL_PART_VENDOR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) set_fw_info_part(info, &fi->vendor[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case SWITCHTEC_IOCTL_PART_VENDOR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) set_fw_info_part(info, &fi->vendor[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case SWITCHTEC_IOCTL_PART_VENDOR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) set_fw_info_part(info, &fi->vendor[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case SWITCHTEC_IOCTL_PART_VENDOR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) set_fw_info_part(info, &fi->vendor[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) case SWITCHTEC_IOCTL_PART_VENDOR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) set_fw_info_part(info, &fi->vendor[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case SWITCHTEC_IOCTL_PART_VENDOR5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) set_fw_info_part(info, &fi->vendor[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case SWITCHTEC_IOCTL_PART_VENDOR6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) set_fw_info_part(info, &fi->vendor[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) case SWITCHTEC_IOCTL_PART_VENDOR7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) set_fw_info_part(info, &fi->vendor[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (info->address == active_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int flash_part_info_gen4(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct switchtec_ioctl_flash_part_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) switch (info->flash_partition) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case SWITCHTEC_IOCTL_PART_MAP_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) set_fw_info_part(info, &fi->map0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case SWITCHTEC_IOCTL_PART_MAP_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) set_fw_info_part(info, &fi->map1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) case SWITCHTEC_IOCTL_PART_KEY_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) set_fw_info_part(info, &fi->key0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) case SWITCHTEC_IOCTL_PART_KEY_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) set_fw_info_part(info, &fi->key1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case SWITCHTEC_IOCTL_PART_BL2_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) set_fw_info_part(info, &fi->bl2_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) case SWITCHTEC_IOCTL_PART_BL2_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) set_fw_info_part(info, &fi->bl2_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case SWITCHTEC_IOCTL_PART_CFG0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) set_fw_info_part(info, &fi->cfg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) case SWITCHTEC_IOCTL_PART_CFG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) set_fw_info_part(info, &fi->cfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case SWITCHTEC_IOCTL_PART_IMG0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) set_fw_info_part(info, &fi->img0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case SWITCHTEC_IOCTL_PART_IMG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) set_fw_info_part(info, &fi->img1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) case SWITCHTEC_IOCTL_PART_NVLOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) set_fw_info_part(info, &fi->nvlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) case SWITCHTEC_IOCTL_PART_VENDOR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) set_fw_info_part(info, &fi->vendor[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) case SWITCHTEC_IOCTL_PART_VENDOR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) set_fw_info_part(info, &fi->vendor[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) case SWITCHTEC_IOCTL_PART_VENDOR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) set_fw_info_part(info, &fi->vendor[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) case SWITCHTEC_IOCTL_PART_VENDOR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_fw_info_part(info, &fi->vendor[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) case SWITCHTEC_IOCTL_PART_VENDOR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) set_fw_info_part(info, &fi->vendor[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) case SWITCHTEC_IOCTL_PART_VENDOR5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) set_fw_info_part(info, &fi->vendor[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) case SWITCHTEC_IOCTL_PART_VENDOR6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) set_fw_info_part(info, &fi->vendor[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) case SWITCHTEC_IOCTL_PART_VENDOR7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) set_fw_info_part(info, &fi->vendor[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static int ioctl_flash_part_info(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct switchtec_ioctl_flash_part_info __user *uinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct switchtec_ioctl_flash_part_info info = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (copy_from_user(&info, uinfo, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (stdev->gen == SWITCHTEC_GEN3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = flash_part_info_gen3(stdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else if (stdev->gen == SWITCHTEC_GEN4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret = flash_part_info_gen4(stdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (copy_to_user(uinfo, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int ioctl_event_summary(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct switchtec_user *stuser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct switchtec_ioctl_event_summary __user *usum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct switchtec_ioctl_event_summary *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) s = kzalloc(sizeof(*s), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) s->global = ioread32(&stdev->mmio_sw_event->global_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) for (i = 0; i < stdev->partition_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) s->part[i] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) for (i = 0; i < stdev->pff_csr_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) s->pff[i] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (copy_to_user(usum, s, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) goto error_case;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) stuser->event_cnt = atomic_read(&stdev->event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) error_case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) size_t offset, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return (void __iomem *)stdev->mmio_sw_event + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) size_t offset, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) size_t offset, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static const struct event_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) size_t offset, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) } event_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) twi_mrpc_comp_async_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cli_mrpc_comp_async_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) intercomm_notify_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int event_id, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return (u32 __iomem *)ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) off = event_regs[event_id].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (event_regs[event_id].map_reg == part_ev_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) index = stdev->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) else if (index < 0 || index >= stdev->partition_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return (u32 __iomem *)ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) } else if (event_regs[event_id].map_reg == pff_ev_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (index < 0 || index >= stdev->pff_csr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return (u32 __iomem *)ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return event_regs[event_id].map_reg(stdev, off, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static int event_ctl(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct switchtec_ioctl_event_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) u32 __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) u32 hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (IS_ERR(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return PTR_ERR(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) hdr = ioread32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ctl->data[i] = ioread32(®[i + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ctl->count = (hdr >> 5) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) hdr &= ~SWITCHTEC_EVENT_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) hdr |= SWITCHTEC_EVENT_EN_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) hdr |= SWITCHTEC_EVENT_EN_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) hdr &= ~SWITCHTEC_EVENT_EN_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) hdr |= SWITCHTEC_EVENT_EN_CLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) hdr &= ~SWITCHTEC_EVENT_EN_CLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) hdr |= SWITCHTEC_EVENT_FATAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) hdr &= ~SWITCHTEC_EVENT_FATAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (ctl->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) iowrite32(hdr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ctl->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (hdr & SWITCHTEC_EVENT_EN_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (hdr & SWITCHTEC_EVENT_EN_LOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (hdr & SWITCHTEC_EVENT_EN_CLI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (hdr & SWITCHTEC_EVENT_FATAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static int ioctl_event_ctl(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct switchtec_ioctl_event_ctl __user *uctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int nr_idxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) unsigned int event_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct switchtec_ioctl_event_ctl ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (copy_from_user(&ctl, uctl, sizeof(ctl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (event_regs[ctl.event_id].map_reg == global_ev_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) nr_idxs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) nr_idxs = stdev->partition_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) nr_idxs = stdev->pff_csr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) event_flags = ctl.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ctl.flags = event_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = event_ctl(stdev, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ret = event_ctl(stdev, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (copy_to_user(uctl, &ctl, sizeof(ctl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static int ioctl_pff_to_port(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct switchtec_ioctl_pff_port __user *up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int i, part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct part_cfg_regs __iomem *pcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct switchtec_ioctl_pff_port p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (copy_from_user(&p, up, sizeof(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) p.port = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) for (part = 0; part < stdev->partition_count; part++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) pcfg = &stdev->mmio_part_cfg_all[part];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) p.partition = part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) reg = ioread32(&pcfg->usp_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (reg == p.pff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) p.port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) reg = ioread32(&pcfg->vep_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (reg == p.pff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) p.port = SWITCHTEC_IOCTL_PFF_VEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (reg != p.pff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) p.port = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (p.port != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (copy_to_user(up, &p, sizeof(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int ioctl_port_to_pff(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct switchtec_ioctl_pff_port __user *up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct switchtec_ioctl_pff_port p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct part_cfg_regs __iomem *pcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (copy_from_user(&p, up, sizeof(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) pcfg = stdev->mmio_part_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) else if (p.partition < stdev->partition_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pcfg = &stdev->mmio_part_cfg_all[p.partition];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) switch (p.port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) p.pff = ioread32(&pcfg->usp_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) case SWITCHTEC_IOCTL_PFF_VEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) p.pff = ioread32(&pcfg->vep_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) p.port = array_index_nospec(p.port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (copy_to_user(up, &p, sizeof(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct switchtec_user *stuser = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct switchtec_dev *stdev = stuser->stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) rc = lock_mutex_and_test_alive(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) case SWITCHTEC_IOCTL_FLASH_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) rc = ioctl_flash_info(stdev, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case SWITCHTEC_IOCTL_FLASH_PART_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) rc = ioctl_flash_part_info(stdev, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) rc = ioctl_event_summary(stdev, stuser, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) sizeof(struct switchtec_ioctl_event_summary_legacy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) case SWITCHTEC_IOCTL_EVENT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) rc = ioctl_event_ctl(stdev, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case SWITCHTEC_IOCTL_PFF_TO_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rc = ioctl_pff_to_port(stdev, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case SWITCHTEC_IOCTL_PORT_TO_PFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) rc = ioctl_port_to_pff(stdev, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) case SWITCHTEC_IOCTL_EVENT_SUMMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) rc = ioctl_event_summary(stdev, stuser, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) sizeof(struct switchtec_ioctl_event_summary));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) rc = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static const struct file_operations switchtec_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .open = switchtec_dev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) .release = switchtec_dev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) .write = switchtec_dev_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .read = switchtec_dev_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) .poll = switchtec_dev_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .unlocked_ioctl = switchtec_dev_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static void link_event_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) stdev = container_of(work, struct switchtec_dev, link_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (stdev->link_notifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) stdev->link_notifier(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static void check_link_state_events(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int occurred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) for (idx = 0; idx < stdev->pff_csr_count; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) count = (reg >> 5) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (count != stdev->link_event_count[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) occurred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) stdev->link_event_count[idx] = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (occurred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) schedule_work(&stdev->link_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static void enable_link_state_events(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (idx = 0; idx < stdev->pff_csr_count; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) iowrite32(SWITCHTEC_EVENT_CLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) SWITCHTEC_EVENT_EN_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) &stdev->mmio_pff_csr[idx].link_state_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void enable_dma_mrpc(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) flush_wc_buf(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static void stdev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct switchtec_dev *stdev = to_stdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (stdev->dma_mrpc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) iowrite32(0, &stdev->mmio_mrpc->dma_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) flush_wc_buf(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) writeq(0, &stdev->mmio_mrpc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) kfree(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static void stdev_kill(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct switchtec_user *stuser, *tmpuser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) pci_clear_master(stdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) cancel_delayed_work_sync(&stdev->mrpc_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* Mark the hardware as unavailable and complete all completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) mutex_lock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) stdev->alive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* Wake up and kill any users waiting on an MRPC request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) stuser->cmd_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) wake_up_interruptible(&stuser->cmd_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) list_del_init(&stuser->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) stuser_put(stuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) mutex_unlock(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* Wake up any users waiting on event_wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) wake_up_interruptible(&stdev->event_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) dev_to_node(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) stdev->alive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) stdev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) INIT_LIST_HEAD(&stdev->mrpc_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) mutex_init(&stdev->mrpc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) stdev->mrpc_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) INIT_WORK(&stdev->link_event_work, link_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) init_waitqueue_head(&stdev->event_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) atomic_set(&stdev->event_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) dev = &stdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) device_initialize(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dev->class = switchtec_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) dev->parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) dev->groups = switchtec_device_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dev->release = stdev_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (minor < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) rc = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) dev_set_name(dev, "switchtec%d", minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) cdev = &stdev->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) cdev_init(cdev, &switchtec_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) cdev->owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) put_device(&stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) size_t off = event_regs[eid].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) u32 __iomem *hdr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) u32 hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) hdr = ioread32(hdr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) iowrite32(hdr, hdr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int mask_all_events(struct switchtec_dev *stdev, int eid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (event_regs[eid].map_reg == part_ev_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) for (idx = 0; idx < stdev->partition_count; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) count += mask_event(stdev, eid, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) } else if (event_regs[eid].map_reg == pff_ev_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) for (idx = 0; idx < stdev->pff_csr_count; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (!stdev->pff_local[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) count += mask_event(stdev, eid, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) count += mask_event(stdev, eid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static irqreturn_t switchtec_event_isr(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct switchtec_dev *stdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) int eid, event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (reg & SWITCHTEC_EVENT_OCCURRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) schedule_work(&stdev->mrpc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) check_link_state_events(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) event_count += mask_all_events(stdev, eid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (event_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) atomic_inc(&stdev->event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) wake_up_interruptible(&stdev->event_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) event_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct switchtec_dev *stdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) iowrite32(SWITCHTEC_EVENT_CLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) SWITCHTEC_EVENT_EN_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) &stdev->mmio_part_cfg->mrpc_comp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) schedule_work(&stdev->mrpc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static int switchtec_init_isr(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) int nvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int event_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int dma_mrpc_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (nirqs < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) nirqs = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) PCI_IRQ_MSIX | PCI_IRQ_MSI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) PCI_IRQ_VIRTUAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (nvecs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return nvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (event_irq < 0 || event_irq >= nvecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) event_irq = pci_irq_vector(stdev->pdev, event_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (event_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return event_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) rc = devm_request_irq(&stdev->pdev->dev, event_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) switchtec_event_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) KBUILD_MODNAME, stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (!stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (dma_mrpc_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return dma_mrpc_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) switchtec_dma_mrpc_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) KBUILD_MODNAME, stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static void init_pff(struct switchtec_dev *stdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (reg != PCI_VENDOR_ID_MICROSEMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) stdev->pff_csr_count = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) reg = ioread32(&pcfg->usp_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (reg < stdev->pff_csr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) stdev->pff_local[reg] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) reg = ioread32(&pcfg->vep_pff_inst_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (reg < stdev->pff_csr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) stdev->pff_local[reg] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (reg < stdev->pff_csr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) stdev->pff_local[reg] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static int switchtec_init_pci(struct switchtec_dev *stdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void __iomem *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) unsigned long res_start, res_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) u32 __iomem *part_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) res_start = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) res_len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!devm_request_mem_region(&pdev->dev, res_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) res_len, KBUILD_MODNAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) SWITCHTEC_GAS_TOP_CFG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (!stdev->mmio_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) map = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (stdev->gen == SWITCHTEC_GEN3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) part_id = &stdev->mmio_sys_info->gen3.partition_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) else if (stdev->gen == SWITCHTEC_GEN4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) part_id = &stdev->mmio_sys_info->gen4.partition_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) stdev->partition = ioread8(part_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (stdev->partition_count < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) stdev->partition_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) init_pff(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) pci_set_drvdata(pdev, stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (!use_dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) sizeof(*stdev->dma_mrpc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) &stdev->dma_mrpc_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (stdev->dma_mrpc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) static int switchtec_pci_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct switchtec_dev *stdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) request_module_nowait("ntb_hw_switchtec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) stdev = stdev_create(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (IS_ERR(stdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return PTR_ERR(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) stdev->gen = id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) rc = switchtec_init_pci(stdev, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) rc = switchtec_init_isr(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dev_err(&stdev->dev, "failed to init isr.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) iowrite32(SWITCHTEC_EVENT_CLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) SWITCHTEC_EVENT_EN_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) &stdev->mmio_part_cfg->mrpc_comp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) enable_link_state_events(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (stdev->dma_mrpc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) enable_dma_mrpc(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) rc = cdev_device_add(&stdev->cdev, &stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) goto err_devadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dev_info(&stdev->dev, "Management device registered.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) err_devadd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) stdev_kill(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) put_device(&stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static void switchtec_pci_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct switchtec_dev *stdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) cdev_device_del(&stdev->cdev, &stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) dev_info(&stdev->dev, "unregistered.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) stdev_kill(stdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) put_device(&stdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .vendor = PCI_VENDOR_ID_MICROSEMI, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) .device = device_id, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) .subvendor = PCI_ANY_ID, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) .subdevice = PCI_ANY_ID, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) .class = (PCI_CLASS_MEMORY_OTHER << 8), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) .class_mask = 0xFFFFFFFF, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .driver_data = gen, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) .vendor = PCI_VENDOR_ID_MICROSEMI, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) .device = device_id, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) .subvendor = PCI_ANY_ID, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) .subdevice = PCI_ANY_ID, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) .class_mask = 0xFFFFFFFF, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) .driver_data = gen, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static const struct pci_device_id switchtec_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), //PFX 24xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), //PFX 32xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), //PFX 48xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), //PFX 64xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), //PFX 80xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), //PFX 96xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), //PSX 24xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), //PSX 32xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), //PSX 48xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), //PSX 64xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), //PSX 80xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), //PSX 96xG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), //PAX 24XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), //PAX 32XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), //PAX 48XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), //PAX 64XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), //PAX 80XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), //PAX 96XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), //PFXL 24XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), //PFXL 32XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), //PFXL 48XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), //PFXL 64XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), //PFXL 80XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), //PFXL 96XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), //PFXI 24XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), //PFXI 32XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), //PFXI 48XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), //PFXI 64XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), //PFXI 80XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), //PFXI 96XG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), //PFX 100XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), //PFX 84XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), //PFX 68XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), //PFX 52XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), //PFX 36XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), //PFX 28XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), //PSX 100XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), //PSX 84XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), //PSX 68XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), //PSX 52XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), //PSX 36XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), //PSX 28XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), //PAX 100XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), //PAX 84XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), //PAX 68XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static struct pci_driver switchtec_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) .id_table = switchtec_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) .probe = switchtec_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) .remove = switchtec_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static int __init switchtec_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) "switchtec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) switchtec_class = class_create(THIS_MODULE, "switchtec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (IS_ERR(switchtec_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) rc = PTR_ERR(switchtec_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) goto err_create_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) rc = pci_register_driver(&switchtec_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) goto err_pci_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) pr_info(KBUILD_MODNAME ": loaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) err_pci_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) class_destroy(switchtec_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) err_create_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) unregister_chrdev_region(switchtec_devt, max_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) module_init(switchtec_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void __exit switchtec_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) pci_unregister_driver(&switchtec_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) class_destroy(switchtec_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) unregister_chrdev_region(switchtec_devt, max_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) ida_destroy(&switchtec_minor_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) pr_info(KBUILD_MODNAME ": unloaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) module_exit(switchtec_exit);