^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Parts of this driver are based on the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * - Kvaser linux pciefd driver (version 5.25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * - PEAK linux canfd driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * - Altera Avalon EPCS flash controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/can/dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define KVASER_PCIEFD_MAX_ERR_REP 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define KVASER_PCIEFD_DMA_COUNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define KVASER_PCIEFD_VENDOR 0x1a07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define KVASER_PCIEFD_4HS_ID 0x0d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define KVASER_PCIEFD_2HS_ID 0x0e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define KVASER_PCIEFD_HS_ID 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* PCIe IRQ registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define KVASER_PCIEFD_IRQ_REG 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define KVASER_PCIEFD_IEN_REG 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* DMA map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Kvaser KCAN CAN controller registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define KVASER_PCIEFD_KCAN0_BASE 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Loopback control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define KVASER_PCIEFD_LOOP_REG 0x1f000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* System identification and information registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define KVASER_PCIEFD_SYSID_BASE 0x1f020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Shared receive buffer registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define KVASER_PCIEFD_SRB_BASE 0x1f200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* EPCS flash controller registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define KVASER_PCIEFD_SPI_BASE 0x1fc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define KVASER_PCIEFD_IRQ_SRB BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Reset DMA buffer 0, 1 and FIFO offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* DMA packet done, buffer 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* DMA overflow, buffer 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* DMA underflow, buffer 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* DMA idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* DMA Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* EPCS flash controller definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define KVASER_PCIEFD_CFG_MAX_PARAMS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define KVASER_PCIEFD_CFG_SYS_VER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define KVASER_PCIEFD_SPI_TMT BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define KVASER_PCIEFD_SPI_TRDY BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define KVASER_PCIEFD_SPI_RRDY BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Commands for controlling the onboard flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define KVASER_PCIEFD_FLASH_RES_CMD 0xab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define KVASER_PCIEFD_FLASH_READ_CMD 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Kvaser KCAN definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Request status packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Abort, flush and reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Tx FIFO unaligned read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Tx FIFO unaligned end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Bus parameter protection error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* FDF bit when controller is in classic mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Rx FIFO overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Abort done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Tx buffer flush done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Tx FIFO overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Tx FIFO empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Transmitter unaligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Abort request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Idle state. Controller in reset mode and no abort or flush pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Bus off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Reset mode request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Controller in reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Controller got one-shot capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Controller got CAN FD capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) KVASER_PCIEFD_KCAN_STAT_IRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Listen only mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Error packet enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* CAN FD non-ISO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Acknowledgment packet type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Active error flag enable. Clear to force error passive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Classic CAN mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Kvaser KCAN packet types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define KVASER_PCIEFD_PACK_TYPE_DATA 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define KVASER_PCIEFD_PACK_TYPE_ACK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define KVASER_PCIEFD_PACK_TYPE_ERROR 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define KVASER_PCIEFD_PACK_TYPE_STATUS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Kvaser KCAN packet common definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Kvaser KCAN TDATA and RDATA first word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Kvaser KCAN TDATA and RDATA second word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Kvaser KCAN TDATA second word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Kvaser KCAN APACKET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define KVASER_PCIEFD_APACKET_FLU BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define KVASER_PCIEFD_APACKET_CT BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define KVASER_PCIEFD_APACKET_ABL BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define KVASER_PCIEFD_APACKET_NACK BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Kvaser KCAN SPACK first word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define KVASER_PCIEFD_SPACK_IDET BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define KVASER_PCIEFD_SPACK_IRM BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Kvaser KCAN SPACK second word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Kvaser KCAN_EPACK second word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct kvaser_pciefd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct kvaser_pciefd_can {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct can_priv can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct kvaser_pciefd *kv_pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct can_berr_counter bec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u8 cmd_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int err_rep_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int echo_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spinlock_t echo_lock; /* Locks the message echo buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct timer_list bec_poll_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct completion start_comp, flush_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct kvaser_pciefd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct pci_dev *pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void *dma_data[KVASER_PCIEFD_DMA_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u8 nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 bus_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 freq_to_ticks_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct kvaser_pciefd_rx_packet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 header[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct kvaser_pciefd_tx_packet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 header[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u8 data[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .name = KVASER_PCIEFD_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) .tseg1_max = 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) .tseg2_max = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) .sjw_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) .brp_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) .brp_max = 8192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct kvaser_pciefd_cfg_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __le32 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __le32 nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) __le32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct kvaser_pciefd_cfg_img {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) __le32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __le32 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) __le32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static struct pci_device_id kvaser_pciefd_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) { 0,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* Onboard flash memory functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) res, res & msk, 0, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u32 tx_len, u8 *rx, u32 rx_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) c = tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) while (c--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) c = rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) while (c-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (c != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct kvaser_pciefd_cfg_img *img)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int res, crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u8 *crc_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) u8 cmd[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) KVASER_PCIEFD_FLASH_READ_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (u8)((offset >> 16) & 0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (u8)((offset >> 8) & 0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) (u8)(offset & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) KVASER_PCIEFD_CFG_IMG_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) crc_buff = (u8 *)img->params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) "Config flash corrupted, version number is wrong\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "Config flash corrupted, magic number is wrong\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (le32_to_cpu(img->crc) != crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) "Stored CRC does not match flash image contents\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct kvaser_pciefd_cfg_img *img)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct kvaser_pciefd_cfg_param *param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct kvaser_pciefd_cfg_img *img;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Read electronic signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!img)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) res = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto image_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto image_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) } else if (cmd[0] & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* No write is ever done, the WIP should never be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto image_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto image_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) kvaser_pciefd_cfg_read_params(pcie, img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) image_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kfree(img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u32 msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Use ACK packet type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* If controller is already idle, run abort, flush and reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cmd = KVASER_PCIEFD_KCAN_CMD_AT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Put controller in reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mode |= KVASER_PCIEFD_KCAN_MODE_RM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) del_timer(&can->bec_poll_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!completion_done(&can->flush_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) kvaser_pciefd_start_controller_flush(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!wait_for_completion_timeout(&can->flush_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) KVASER_PCIEFD_WAIT_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) netdev_err(can->can.dev, "Timeout during bus on flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!wait_for_completion_timeout(&can->start_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) KVASER_PCIEFD_WAIT_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) netdev_err(can->can.dev, "Timeout during bus on reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Reset interrupt handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) kvaser_pciefd_set_tx_irq(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) kvaser_pciefd_setup_controller(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) can->can.state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) netif_wake_queue(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) can->bec.txerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) can->bec.rxerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) can->err_rep_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) u8 top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u32 pwm_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Set duty cycle to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pwm_ctrl |= top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int top, trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u32 pwm_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) unsigned long irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) kvaser_pciefd_pwm_stop(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_lock_irqsave(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* Set frequency to 500 KHz*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pwm_ctrl = top & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* Set duty cycle to 95 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) trigger = (100 * top - 95 * (top + 1) + 50) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pwm_ctrl = trigger & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) spin_unlock_irqrestore(&can->lock, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static int kvaser_pciefd_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct kvaser_pciefd_can *can = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) err = open_candev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) err = kvaser_pciefd_bus_on(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) close_candev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int kvaser_pciefd_stop(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct kvaser_pciefd_can *can = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Don't interrupt ongoing flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!completion_done(&can->flush_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) kvaser_pciefd_start_controller_flush(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!wait_for_completion_timeout(&can->flush_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) KVASER_PCIEFD_WAIT_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) netdev_err(can->can.dev, "Timeout during stop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) del_timer(&can->bec_poll_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) close_candev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct kvaser_pciefd_can *can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct canfd_frame *cf = (struct canfd_frame *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int seq = can->echo_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) memset(p, 0, sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (cf->can_id & CAN_RTR_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (cf->can_id & CAN_EFF_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) p->header[0] |= cf->can_id & CAN_EFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (can_is_canfd_skb(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (cf->flags & CANFD_BRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (cf->flags & CANFD_ESI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) packet_size = cf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) memcpy(p->data, cf->data, packet_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return DIV_ROUND_UP(packet_size, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct kvaser_pciefd_can *can = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct kvaser_pciefd_tx_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u8 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (can_dropped_invalid_skb(netdev, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) spin_lock_irqsave(&can->echo_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Prepare and save echo skb in internal slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) can_put_echo_skb(skb, netdev, can->echo_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Move echo index to the next slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Write header to fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) iowrite32(packet.header[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) iowrite32(packet.header[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (nwords) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) u32 data_last = ((u32 *)packet.data)[nwords - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Write data to fifo, except last word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) iowrite32_rep(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) nwords - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* Write last word to end of fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) __raw_writel(data_last, can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* Complete write to fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) __raw_writel(0, can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* No room for a new message, stop the queue until at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * successful transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) can->can.echo_skb[can->echo_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) spin_unlock_irqrestore(&can->echo_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u32 mode, test, btrn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct can_bittiming *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) bt = &can->can.data_bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bt = &can->can.bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ((bt->brp - 1) & 0x1fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_lock_irqsave(&can->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Put the circuit in reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Can only set bittiming if in reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) test, test & KVASER_PCIEFD_KCAN_MODE_RM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 0, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_unlock_irqrestore(&can->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Restore previous reset mode status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) spin_unlock_irqrestore(&can->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct kvaser_pciefd_can *can = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case CAN_MODE_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (!can->can.restart_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ret = kvaser_pciefd_bus_on(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct can_berr_counter *bec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct kvaser_pciefd_can *can = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) bec->rxerr = can->bec.rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bec->txerr = can->bec.txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) kvaser_pciefd_enable_err_gen(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) kvaser_pciefd_request_status(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) can->err_rep_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static const struct net_device_ops kvaser_pciefd_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .ndo_open = kvaser_pciefd_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .ndo_stop = kvaser_pciefd_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .ndo_start_xmit = kvaser_pciefd_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .ndo_change_mtu = can_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (i = 0; i < pcie->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u32 status, tx_npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) KVASER_PCIEFD_CAN_TX_MAX_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) can = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) can->kv_pcie = pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) can->cmd_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) can->err_rep_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) can->bec.txerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) can->bec.rxerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) init_completion(&can->start_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) init_completion(&can->flush_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Disable Bus load reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) tx_npackets = ioread32(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) "Max Tx count is smaller than expected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) free_candev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) can->can.clock.freq = pcie->freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) can->echo_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) spin_lock_init(&can->echo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_lock_init(&can->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) can->can.do_set_data_bittiming =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) kvaser_pciefd_set_data_bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) can->can.do_set_mode = kvaser_pciefd_set_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) CAN_CTRLMODE_FD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) CAN_CTRLMODE_FD_NON_ISO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) "CAN FD not supported as expected %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) free_candev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) netdev->flags |= IFF_ECHO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) SET_NETDEV_DEV(netdev, &pcie->pci->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) KVASER_PCIEFD_KCAN_IRQ_TFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pcie->can[i] = can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kvaser_pciefd_pwm_start(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; i < pcie->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int err = register_candev(pcie->can[i]->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Unregister all successfully registered devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) unregister_candev(pcie->can[j]->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dma_addr_t addr, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) u32 word1, word2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) word2 = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) word1 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) iowrite32(word1, pcie->reg_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) iowrite32(word2, pcie->reg_base + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u32 srb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Disable the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pcie->dma_data[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dmam_alloc_coherent(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) KVASER_PCIEFD_DMA_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) &dma_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (!pcie->dma_data[i] || !dma_addr[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) KVASER_PCIEFD_DMA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Reset Rx FIFO, and both DMA buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) KVASER_PCIEFD_SRB_CMD_RDB1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* Enable the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) u32 sysid, srb_status, build;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) u8 sysid_nr_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = kvaser_pciefd_read_cfg(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (pcie->nr_channels != sysid_nr_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) "Number of channels does not match: %u vs %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) pcie->nr_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) sysid_nr_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) sysid & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) "Hardware without DMA is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) pcie->bus_freq = ioread32(pcie->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) KVASER_PCIEFD_SYSID_BUSFREQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pcie->freq_to_ticks_div = pcie->freq / 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (pcie->freq_to_ticks_div == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) pcie->freq_to_ticks_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Turn off all loopback functionality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct kvaser_pciefd_rx_packet *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) __le32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct canfd_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct can_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct net_device_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct skb_shared_hwtstamps *shhwtstamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) priv = &pcie->can[ch_id]->can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) stats = &priv->dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) skb = alloc_canfd_skb(priv->dev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) cf->flags |= CANFD_BRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) cf->flags |= CANFD_ESI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) cf->can_id = p->header[0] & CAN_EFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) cf->can_id |= CAN_EFF_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) memcpy(cf->data, data, cf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) shhwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) shhwtstamps->hwtstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ns_to_ktime(div_u64(p->timestamp * 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) pcie->freq_to_ticks_div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) stats->rx_bytes += cf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct can_frame *cf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) enum can_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) enum can_state tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) enum can_state rx_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) can_change_state(can->can.dev, cf, tx_state, rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (new_state == CAN_STATE_BUS_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct net_device *ndev = can->can.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) spin_lock_irqsave(&can->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) netif_stop_queue(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) spin_unlock_irqrestore(&can->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Prevent CAN controller from auto recover from bus off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (!can->can.restart_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) kvaser_pciefd_start_controller_flush(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) can_bus_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct can_berr_counter *bec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) enum can_state *new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) enum can_state *tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) enum can_state *rx_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) p->header[0] & KVASER_PCIEFD_SPACK_IRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) *new_state = CAN_STATE_BUS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) else if (bec->txerr >= 255 || bec->rxerr >= 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) *new_state = CAN_STATE_BUS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) *new_state = CAN_STATE_ERROR_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) else if (bec->txerr >= 128 || bec->rxerr >= 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) *new_state = CAN_STATE_ERROR_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *new_state = CAN_STATE_ERROR_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else if (bec->txerr >= 96 || bec->rxerr >= 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) *new_state = CAN_STATE_ERROR_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *new_state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct can_berr_counter bec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) enum can_state old_state, new_state, tx_state, rx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct net_device *ndev = can->can.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct can_frame *cf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct skb_shared_hwtstamps *shhwtstamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) old_state = can->can.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) bec.txerr = p->header[0] & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) &rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) skb = alloc_can_err_skb(ndev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (new_state != old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) kvaser_pciefd_change_state(can, cf, new_state, tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (old_state == CAN_STATE_BUS_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) new_state == CAN_STATE_ERROR_ACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) can->can.restart_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) can->can.can_stats.restarts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) cf->can_id |= CAN_ERR_RESTARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) can->err_rep_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) can->can.can_stats.bus_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) can->bec.txerr = bec.txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) can->bec.rxerr = bec.rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) shhwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) shhwtstamps->hwtstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ns_to_ktime(div_u64(p->timestamp * 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) can->kv_pcie->freq_to_ticks_div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cf->can_id |= CAN_ERR_BUSERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) cf->data[6] = bec.txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) cf->data[7] = bec.rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) can = pcie->can[ch_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) kvaser_pciefd_rx_error_frame(can, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* Do not report more errors, until bec_poll_timer expires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) kvaser_pciefd_disable_err_gen(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Start polling the error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct can_berr_counter bec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) enum can_state old_state, new_state, tx_state, rx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) old_state = can->can.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) bec.txerr = p->header[0] & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) &rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (new_state != old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct net_device *ndev = can->can.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct can_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct skb_shared_hwtstamps *shhwtstamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) skb = alloc_can_err_skb(ndev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) kvaser_pciefd_change_state(can, cf, new_state, tx_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (old_state == CAN_STATE_BUS_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) new_state == CAN_STATE_ERROR_ACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) can->can.restart_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) can->can.can_stats.restarts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) cf->can_id |= CAN_ERR_RESTARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) shhwtstamps = skb_hwtstamps(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) shhwtstamps->hwtstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ns_to_ktime(div_u64(p->timestamp * 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) can->kv_pcie->freq_to_ticks_div));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) cf->data[6] = bec.txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) cf->data[7] = bec.rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) can->bec.txerr = bec.txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) can->bec.rxerr = bec.rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Check if we need to poll the error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (bec.txerr || bec.rxerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) u8 cmdseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) can = pcie->can[ch_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* Reset done, start abort and flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) cmd = KVASER_PCIEFD_KCAN_CMD_AT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /* Reset detected, send end of flush if no packet are in FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) u8 count = ioread32(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* Response to status request received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) kvaser_pciefd_handle_status_resp(can, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (can->can.state != CAN_STATE_BUS_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) can->can.state != CAN_STATE_ERROR_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) mod_timer(&can->bec_poll_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) KVASER_PCIEFD_BEC_POLL_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* Reset to bus on detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (!completion_done(&can->start_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) complete(&can->start_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) can = pcie->can[ch_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* If this is the last flushed packet, send end of flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) u8 count = ioread32(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) int dlc = can_get_echo_skb(can->can.dev, echo_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct net_device_stats *stats = &can->can.dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) stats->tx_bytes += dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (netif_queue_stopped(can->can.dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) netif_wake_queue(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct net_device_stats *stats = &can->can.dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct can_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) skb = alloc_can_err_skb(can->can.dev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) cf->can_id |= CAN_ERR_LOSTARB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) can->can.can_stats.arbitration_lost++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) } else if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) cf->can_id |= CAN_ERR_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) cf->can_id |= CAN_ERR_BUSERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) netdev_warn(can->can.dev, "No memory left for err_skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) bool one_shot_fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) can = pcie->can[ch_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* Ignore control packet ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) kvaser_pciefd_handle_nack_packet(can, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) one_shot_fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) netdev_dbg(can->can.dev, "Packet was flushed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int dlc = can_get_echo_skb(can->can.dev, echo_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u8 count = ioread32(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) netif_queue_stopped(can->can.dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) netif_wake_queue(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!one_shot_fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct net_device_stats *stats = &can->can.dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) stats->tx_bytes += dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct kvaser_pciefd_rx_packet *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (ch_id >= pcie->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) can = pcie->can[ch_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!completion_done(&can->flush_comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) complete(&can->flush_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) __le32 *buffer = pcie->dma_data[dma_buf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) __le64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct kvaser_pciefd_rx_packet packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct kvaser_pciefd_rx_packet *p = &packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) int pos = *start_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) size = le32_to_cpu(buffer[pos++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) *start_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) p->header[0] = le32_to_cpu(buffer[pos++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) p->header[1] = le32_to_cpu(buffer[pos++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) /* Read 64-bit timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) memcpy(×tamp, &buffer[pos], sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) pos += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) p->timestamp = le64_to_cpu(timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) case KVASER_PCIEFD_PACK_TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) u8 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) data_len = can_dlc2len(p->header[1] >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) KVASER_PCIEFD_RPACKET_DLC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) pos += DIV_ROUND_UP(data_len, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) case KVASER_PCIEFD_PACK_TYPE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ret = kvaser_pciefd_handle_ack_packet(pcie, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) case KVASER_PCIEFD_PACK_TYPE_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ret = kvaser_pciefd_handle_status_packet(pcie, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) case KVASER_PCIEFD_PACK_TYPE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ret = kvaser_pciefd_handle_error_packet(pcie, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ret = kvaser_pciefd_handle_eack_packet(pcie, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) case KVASER_PCIEFD_PACK_TYPE_TXRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) dev_info(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) "Received unexpected packet type 0x%08X\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* Position does not point to the end of the package,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * corrupted packet size?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if ((*start_pos + size) != pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /* Point to the next packet header, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) *start_pos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) kvaser_pciefd_read_buffer(pcie, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) /* Reset DMA buffer 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) kvaser_pciefd_read_buffer(pcie, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* Reset DMA buffer 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) netdev_err(can->can.dev, "Tx FIFO overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) u8 count = ioread32(can->reg_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) netdev_err(can->can.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) "Fail to change bittiming, when not in reset mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) netdev_err(can->can.dev, "Rx FIFO overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) u32 board_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (board_irq & KVASER_PCIEFD_IRQ_SRB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) kvaser_pciefd_receive_irq(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) for (i = 0; i < pcie->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!pcie->can[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) dev_err(&pcie->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) "IRQ mask points to unallocated controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) /* Check that mask matches channel (i) IRQ mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (board_irq & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) kvaser_pciefd_transmit_irq(pcie->can[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) for (i = 0; i < pcie->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) can = pcie->can[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (can) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) iowrite32(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) kvaser_pciefd_pwm_stop(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) free_candev(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static int kvaser_pciefd_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct kvaser_pciefd *pcie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (!pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) pci_set_drvdata(pdev, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) pcie->pci = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) goto err_disable_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) pcie->reg_base = pci_iomap(pdev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (!pcie->reg_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto err_release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) err = kvaser_pciefd_setup_board(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) goto err_pci_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) err = kvaser_pciefd_setup_dma(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto err_pci_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) err = kvaser_pciefd_setup_can_ctrls(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) goto err_teardown_can_ctrls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /* Reset IRQ handling, expected to be off before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) pcie->reg_base + KVASER_PCIEFD_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* Ready the DMA buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) goto err_teardown_can_ctrls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) err = kvaser_pciefd_reg_candev(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) free_irq(pcie->pci->irq, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) err_teardown_can_ctrls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) kvaser_pciefd_teardown_can_ctrls(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) pci_clear_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) err_pci_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) pci_iounmap(pdev, pcie->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) err_release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) err_disable_pci:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct kvaser_pciefd_can *can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) for (i = 0; i < pcie->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) can = pcie->can[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (can) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) iowrite32(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) unregister_candev(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) del_timer(&can->bec_poll_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) kvaser_pciefd_pwm_stop(can);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) free_candev(can->can.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static void kvaser_pciefd_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) kvaser_pciefd_remove_all_ctrls(pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /* Turn off IRQ generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) free_irq(pcie->pci->irq, pcie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) pci_clear_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) pci_iounmap(pdev, pcie->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static struct pci_driver kvaser_pciefd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) .name = KVASER_PCIEFD_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) .id_table = kvaser_pciefd_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) .probe = kvaser_pciefd_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) .remove = kvaser_pciefd_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) module_pci_driver(kvaser_pciefd)