^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Xilinx CAN device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2012 - 2014 Xilinx, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 PetaLogix. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/can/dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/can/error.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/can/led.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DRIVER_NAME "xilinx_can"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* CAN registers set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) enum xcan_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) XCAN_SRR_OFFSET = 0x00, /* Software reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) XCAN_MSR_OFFSET = 0x04, /* Mode select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) XCAN_ECR_OFFSET = 0x10, /* Error counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) XCAN_ESR_OFFSET = 0x14, /* Error status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) XCAN_SR_OFFSET = 0x18, /* Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* not on CAN FD cores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* only on CAN FD cores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Prescalar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define XCAN_CANFD_FRAME_SIZE 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) XCAN_CANFD_FRAME_SIZE * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) XCAN_CANFD_FRAME_SIZE * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) XCAN_CANFD_FRAME_SIZE * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* the single TX mailbox used by this driver on CAN FD HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define XCAN_TX_MAILBOX_IDX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* CAN frame length constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define XCAN_FRAME_MAX_DATA_LEN 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define XCANFD_DW_BYTES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define XCAN_TIMEOUT (1 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* TX-FIFO-empty interrupt available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define XCAN_FLAG_TXFEMP 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* RX Match Not Finished interrupt available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define XCAN_FLAG_RXMNF 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Extended acceptance filters with control at 0xE0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define XCAN_FLAG_EXT_FILTERS 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* TX mailboxes instead of TX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define XCAN_FLAG_TX_MAILBOXES 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* RX FIFO with each buffer in separate registers at 0x1100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * instead of the regular FIFO at 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define XCAN_FLAG_CANFD_2 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) enum xcan_ip_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) XAXI_CAN = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) XZYNQ_CANPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) XAXI_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) XAXI_CANFD_2_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct xcan_devtype_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) enum xcan_ip_type cantype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) const struct can_bittiming_const *bittiming_const;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const char *bus_clk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int btr_ts2_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int btr_sjw_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * struct xcan_priv - This definition define CAN driver instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @can: CAN private data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @tx_lock: Lock for synchronizing TX interrupt handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @tx_head: Tx CAN packets ready to send on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @tx_tail: Tx CAN packets successfully sended on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @tx_max: Maximum number packets the driver can send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @napi: NAPI structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @read_reg: For reading data from CAN registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @write_reg: For writing data to CAN registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @dev: Network device data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @reg_base: Ioremapped address to registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @irq_flags: For request_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @bus_clk: Pointer to struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @can_clk: Pointer to struct clk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @devtype: Device type specific constants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct xcan_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct can_priv can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int tx_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct clk *bus_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct clk *can_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct xcan_devtype_data devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* CAN Bittiming constants as per Xilinx CAN specs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static const struct can_bittiming_const xcan_bittiming_const = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .tseg1_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .tseg2_max = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .sjw_max = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .brp_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .brp_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static const struct can_bittiming_const xcan_bittiming_const_canfd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .tseg1_max = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .tseg2_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .sjw_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .brp_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .brp_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .tseg1_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .tseg2_max = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .sjw_max = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .brp_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .brp_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .tseg1_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .tseg2_max = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .sjw_max = 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .brp_min = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .brp_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .tseg1_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .tseg1_max = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .tseg2_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .tseg2_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .sjw_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .brp_min = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .brp_max = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .brp_inc = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * xcan_write_reg_le - Write a value to the device register little endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @reg: Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @val: Value to write at the Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Write data to the paricular CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) iowrite32(val, priv->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * xcan_read_reg_le - Read a value from the device register little endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @reg: Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Read data from the particular CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Return: value read from the CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ioread32(priv->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * xcan_write_reg_be - Write a value to the device register big endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @reg: Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @val: Value to write at the Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Write data to the paricular CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) iowrite32be(val, priv->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * xcan_read_reg_be - Read a value from the device register big endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @reg: Register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Read data from the particular CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Return: value read from the CAN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return ioread32be(priv->reg_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * xcan_rx_int_mask - Get the mask for the receive interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Return: The receive interrupt mask used by the driver on this HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* RXNEMP is better suited for our use case as it cannot be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * while the FIFO is non-empty, but CAN FD HW does not have it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return XCAN_IXR_RXOK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return XCAN_IXR_RXNEMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * set_reset_mode - Resets the CAN device mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * This is the driver reset mode routine.The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * enters into configuration mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int set_reset_mode(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) timeout = jiffies + XCAN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) netdev_warn(ndev, "timed out for config mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) usleep_range(500, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* reset clears FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) priv->tx_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) priv->tx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * xcan_set_bittiming - CAN set bit timing routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * This is the driver set bittiming routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static int xcan_set_bittiming(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct can_bittiming *bt = &priv->can.bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct can_bittiming *dbt = &priv->can.data_bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) u32 btr0, btr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 is_config_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Check whether Xilinx CAN is in configuration mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * It cannot set bit timing if Xilinx CAN is not in configuration mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) XCAN_SR_CONFIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!is_config_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) netdev_alert(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) "BUG! Cannot set bittiming - CAN is not in config mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Setting Baud Rate prescalar value in BRPR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) btr0 = (bt->brp - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Setting Time Segment 1 in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Setting Time Segment 2 in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Setting Synchronous jump width in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (priv->devtype.cantype == XAXI_CANFD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) priv->devtype.cantype == XAXI_CANFD_2_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Setting Baud Rate prescalar value in F_BRPR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) btr0 = dbt->brp - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Setting Time Segment 1 in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Setting Time Segment 2 in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Setting Synchronous jump width in BTR Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) priv->read_reg(priv, XCAN_BRPR_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) priv->read_reg(priv, XCAN_BTR_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * xcan_chip_start - This the drivers start routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * This is the drivers start routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Based on the State of the CAN device it puts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * the CAN device into a proper mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int xcan_chip_start(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u32 reg_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u32 ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Check if it is in reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) err = set_reset_mode(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err = xcan_set_bittiming(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Enable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * We enable the ERROR interrupt even with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * dedicated interrupt for a state change to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * ERROR_WARNING/ERROR_PASSIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (priv->devtype.flags & XCAN_FLAG_RXMNF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ier |= XCAN_IXR_RXMNF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) priv->write_reg(priv, XCAN_IER_OFFSET, ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Check whether it is loopback mode or normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) reg_msr = XCAN_MSR_LBACK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) reg_msr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* enable the first extended filter, if any, as cores with extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * filtering default to non-receipt if all filters are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) netdev_dbg(ndev, "status:#x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) priv->read_reg(priv, XCAN_SR_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) priv->can.state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * xcan_do_set_mode - This sets the mode of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * @mode: Tells the mode of the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * This check the drivers state and calls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * the corresponding modes to set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case CAN_MODE_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = xcan_chip_start(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) netdev_err(ndev, "xcan_chip_start failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) netif_wake_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * xcan_write_frame - Write a frame to HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @skb: sk_buff pointer that contains data to be Txed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @frame_offset: Register offset to write the frame to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int frame_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) u32 id, dlc, data[2] = {0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct canfd_frame *cf = (struct canfd_frame *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) u32 ramoff, dwindex = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Watch carefully on the bit sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (cf->can_id & CAN_EFF_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Extended CAN ID format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) XCAN_IDR_ID2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) id |= (((cf->can_id & CAN_EFF_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* The substibute remote TX request bit should be "1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * for extended frames as in the Xilinx CAN datasheet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (cf->can_id & CAN_RTR_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Extended frames remote TX request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) id |= XCAN_IDR_RTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Standard CAN ID format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) XCAN_IDR_ID1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (cf->can_id & CAN_RTR_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Standard frames remote TX request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) id |= XCAN_IDR_SRR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (can_is_canfd_skb(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (cf->flags & CANFD_BRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dlc |= XCAN_DLCR_BRS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dlc |= XCAN_DLCR_EDL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) (priv->devtype.flags & XCAN_FLAG_TXFEMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) can_put_echo_skb(skb, ndev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) priv->tx_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* If the CAN frame is RTR frame this write triggers transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * (not on CAN FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (priv->devtype.cantype == XAXI_CANFD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) priv->devtype.cantype == XAXI_CANFD_2_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for (i = 0; i < cf->len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) (dwindex * XCANFD_DW_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) priv->write_reg(priv, ramoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) be32_to_cpup((__be32 *)(cf->data + i)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dwindex++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (cf->len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (cf->len > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!(cf->can_id & CAN_RTR_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) priv->write_reg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) XCAN_FRAME_DW1_OFFSET(frame_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* If the CAN frame is Standard/Extended frame this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * write triggers transmission (not on CAN FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) priv->write_reg(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) XCAN_FRAME_DW2_OFFSET(frame_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * @skb: sk_buff pointer that contains data to be Txed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Return: 0 on success, -ENOSPC if FIFO is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* Check if the TX buffer is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) XCAN_SR_TXFLL_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) spin_lock_irqsave(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (priv->tx_max > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Check if the TX buffer is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock_irqrestore(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * @skb: sk_buff pointer that contains data to be Txed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * Return: 0 on success, -ENOSPC if there is no space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) BIT(XCAN_TX_MAILBOX_IDX)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) spin_lock_irqsave(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) xcan_write_frame(ndev, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Mark buffer as ready for transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) spin_unlock_irqrestore(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * xcan_start_xmit - Starts the transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * @skb: sk_buff pointer that contains data to be Txed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * This function is invoked from upper layers to initiate transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (can_dropped_invalid_skb(ndev, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = xcan_start_xmit_mailbox(skb, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = xcan_start_xmit_fifo(skb, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) netdev_err(ndev, "BUG!, TX full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * xcan_rx - Is called from CAN isr to complete the received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * frame processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * @frame_base: Register offset to the frame to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * This function is invoked from the CAN isr(poll) to process the Rx frames. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * does minimal processing and invokes "netif_receive_skb" to complete further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Return: 1 on success and 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int xcan_rx(struct net_device *ndev, int frame_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct can_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u32 id_xcan, dlc, data[2] = {0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) skb = alloc_can_skb(ndev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Read a frame from Xilinx zynq CANPS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) XCAN_DLCR_DLC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Change Xilinx CAN data length format to socketCAN data format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cf->can_dlc = get_can_dlc(dlc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Change Xilinx CAN ID format to socketCAN ID format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (id_xcan & XCAN_IDR_IDE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* The received frame is an Extended format frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) XCAN_IDR_ID2_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cf->can_id |= CAN_EFF_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (id_xcan & XCAN_IDR_RTR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* The received frame is a standard format frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) XCAN_IDR_ID1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (id_xcan & XCAN_IDR_SRR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* DW1/DW2 must always be read to remove message from RXFIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!(cf->can_id & CAN_RTR_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Change Xilinx CAN data format to socketCAN data format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (cf->can_dlc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (cf->can_dlc > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * xcanfd_rx - Is called from CAN isr to complete the received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * frame processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * @frame_base: Register offset to the frame to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * This function is invoked from the CAN isr(poll) to process the Rx frames. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * does minimal processing and invokes "netif_receive_skb" to complete further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Return: 1 on success and 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static int xcanfd_rx(struct net_device *ndev, int frame_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct canfd_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (dlc & XCAN_DLCR_EDL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) skb = alloc_canfd_skb(ndev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* Change Xilinx CANFD data length format to socketCAN data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (dlc & XCAN_DLCR_EDL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) XCAN_DLCR_DLC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) XCAN_DLCR_DLC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Change Xilinx CAN ID format to socketCAN ID format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (id_xcan & XCAN_IDR_IDE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* The received frame is an Extended format frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) XCAN_IDR_ID2_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) cf->can_id |= CAN_EFF_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (id_xcan & XCAN_IDR_RTR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* The received frame is a standard format frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) XCAN_IDR_ID1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) XCAN_IDR_SRR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Check the frame received is FD or not*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (dlc & XCAN_DLCR_EDL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) for (i = 0; i < cf->len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) (dwindex * XCANFD_DW_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) data[0] = priv->read_reg(priv, dw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dwindex++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) for (i = 0; i < cf->len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) data[0] = priv->read_reg(priv, dw_offset + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) stats->rx_bytes += cf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * xcan_current_error_state - Get current error state from HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Checks the current CAN error state from the HW. Note that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * only checks for ERROR_PASSIVE and ERROR_WARNING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static enum can_state xcan_current_error_state(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return CAN_STATE_ERROR_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) else if (status & XCAN_SR_ERRWRN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return CAN_STATE_ERROR_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * xcan_set_error_state - Set new CAN error state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * @new_state: The new CAN state to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * @cf: Error frame to be populated or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Set new CAN error state for the device, updating statistics and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * populating the error frame if given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static void xcan_set_error_state(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) enum can_state new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct can_frame *cf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) u32 txerr = ecr & XCAN_ECR_TEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) enum can_state tx_state = txerr >= rxerr ? new_state : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) enum can_state rx_state = txerr <= rxerr ? new_state : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* non-ERROR states are handled elsewhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) can_change_state(ndev, cf, tx_state, rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (cf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cf->data[6] = txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) cf->data[7] = rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * the performed RX/TX has caused it to drop to a lesser state and set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * the interface state accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) enum can_state old_state = priv->can.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) enum can_state new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* changing error state due to successful frame RX/TX can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * occur from these states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (old_state != CAN_STATE_ERROR_WARNING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) old_state != CAN_STATE_ERROR_PASSIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) new_state = xcan_current_error_state(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (new_state != old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct can_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) skb = alloc_can_err_skb(ndev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * xcan_err_interrupt - error frame Isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @ndev: net_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @isr: interrupt status register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * This is the CAN error interrupt and it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * check the the type of error and forward the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * frame to upper layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct can_frame cf = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u32 err_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (isr & XCAN_IXR_BSOFF_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) priv->can.state = CAN_STATE_BUS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) priv->can.can_stats.bus_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Leave device in Config Mode in bus-off state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) can_bus_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) cf.can_id |= CAN_ERR_BUSOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) enum can_state new_state = xcan_current_error_state(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (new_state != priv->can.state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) xcan_set_error_state(ndev, new_state, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Check for Arbitration lost interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (isr & XCAN_IXR_ARBLST_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) priv->can.can_stats.arbitration_lost++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) cf.can_id |= CAN_ERR_LOSTARB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Check for RX FIFO Overflow interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (isr & XCAN_IXR_RXOFLW_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) stats->rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Check for RX Match Not Finished interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (isr & XCAN_IXR_RXMNF_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) netdev_err(ndev, "RX match not finished, frame discarded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Check for error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (isr & XCAN_IXR_ERROR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) bool berr_reporting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) berr_reporting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* Check for Ack error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (err_status & XCAN_ESR_ACKER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (berr_reporting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cf.can_id |= CAN_ERR_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cf.data[3] = CAN_ERR_PROT_LOC_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Check for Bit error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (err_status & XCAN_ESR_BERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (berr_reporting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) cf.can_id |= CAN_ERR_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) cf.data[2] = CAN_ERR_PROT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Check for Stuff error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (err_status & XCAN_ESR_STER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (berr_reporting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) cf.can_id |= CAN_ERR_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cf.data[2] = CAN_ERR_PROT_STUFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* Check for Form error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (err_status & XCAN_ESR_FMER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (berr_reporting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cf.can_id |= CAN_ERR_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) cf.data[2] = CAN_ERR_PROT_FORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Check for CRC error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (err_status & XCAN_ESR_CRCER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (berr_reporting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) cf.can_id |= CAN_ERR_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) priv->can.can_stats.bus_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (cf.can_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct can_frame *skb_cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) skb_cf->can_id |= cf.can_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) stats->rx_bytes += CAN_ERR_DLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) netdev_dbg(ndev, "%s: error status register:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * xcan_state_interrupt - It will check the state of the CAN device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * @ndev: net_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * @isr: interrupt status register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * This will checks the state of the CAN device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * and puts the device into appropriate state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Check for Sleep interrupt if set put CAN device in sleep state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (isr & XCAN_IXR_SLP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) priv->can.state = CAN_STATE_SLEEPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Check for Wake up interrupt if set put CAN device in Active state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (isr & XCAN_IXR_WKUP_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) priv->can.state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * @priv: Driver private data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * Return: Register offset of the next frame in RX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) u32 fsr, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* clear RXOK before the is-empty check so that any newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * received frame will reassert it without a race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* check if RX FIFO is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) mask = XCAN_2_FSR_FL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mask = XCAN_FSR_FL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!(fsr & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* check if RX FIFO is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) XCAN_IXR_RXNEMP_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* frames are read from a static offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) offset = XCAN_RXFIFO_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * xcan_rx_poll - Poll routine for rx packets (NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * @napi: napi structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * @quota: Max number of rx packets to be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * This is the poll routine for rx part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * It will process the packets maximux quota value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * Return: number of packets received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static int xcan_rx_poll(struct napi_struct *napi, int quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct net_device *ndev = napi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) u32 ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int frame_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) (work_done < quota)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) work_done += xcanfd_rx(ndev, frame_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) work_done += xcan_rx(ndev, frame_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* increment read index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) priv->write_reg(priv, XCAN_FSR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) XCAN_FSR_IRI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* clear rx-not-empty (will actually clear only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) priv->write_reg(priv, XCAN_ICR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) XCAN_IXR_RXNEMP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (work_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) can_led_event(ndev, CAN_LED_EVENT_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) xcan_update_error_state_after_rxtx(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (work_done < quota) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ier = priv->read_reg(priv, XCAN_IER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ier |= xcan_rx_int_mask(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) priv->write_reg(priv, XCAN_IER_OFFSET, ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * xcan_tx_interrupt - Tx Done Isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * @ndev: net_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * @isr: Interrupt status register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct net_device_stats *stats = &ndev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned int frames_in_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int frames_sent = 1; /* TXOK => at least 1 frame was sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Synchronize with xmit as we need to know the exact number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * of frames in the FIFO to stay in sync due to the TXFEMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * This also prevents a race between netif_wake_queue() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * netif_stop_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) spin_lock_irqsave(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) frames_in_fifo = priv->tx_head - priv->tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (WARN_ON_ONCE(frames_in_fifo == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* clear TXOK anyway to avoid getting back here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) spin_unlock_irqrestore(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* Check if 2 frames were sent (TXOK only means that at least 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * frame was sent).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (frames_in_fifo > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) WARN_ON(frames_in_fifo > priv->tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* Synchronize TXOK and isr so that after the loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * (1) isr variable is up-to-date at least up to TXOK clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * time. This avoids us clearing a TXOK of a second frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * but not noticing that the FIFO is now empty and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * marking only a single frame as sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * (2) No TXOK is left. Having one could mean leaving a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * stray TXOK as we might process the associated frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * via TXFEMP handling as we read TXFEMP *after* TXOK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * clear to satisfy (1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) while ((isr & XCAN_IXR_TXOK_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) !WARN_ON(++retries == 100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) priv->write_reg(priv, XCAN_ICR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) XCAN_IXR_TXOK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (isr & XCAN_IXR_TXFEMP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* nothing in FIFO anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) frames_sent = frames_in_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* single frame in fifo, just clear TXOK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) while (frames_sent--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) priv->tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) priv->tx_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) netif_wake_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) spin_unlock_irqrestore(&priv->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) can_led_event(ndev, CAN_LED_EVENT_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) xcan_update_error_state_after_rxtx(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * xcan_interrupt - CAN Isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * @irq: irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * @dev_id: device id pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * This is the xilinx CAN Isr. It checks for the type of interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * and invokes the corresponding ISR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static irqreturn_t xcan_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct net_device *ndev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) u32 isr, ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) u32 isr_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) u32 rx_int_mask = xcan_rx_int_mask(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* Get the interrupt status from Xilinx CAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* Check for the type of interrupt and Processing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) XCAN_IXR_WKUP_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) xcan_state_interrupt(ndev, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Check for Tx interrupt and Processing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (isr & XCAN_IXR_TXOK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) xcan_tx_interrupt(ndev, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* Check for the type of error interrupt and Processing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) XCAN_IXR_RXMNF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (isr_errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) xcan_err_interrupt(ndev, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Check for the type of receive interrupt and Processing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (isr & rx_int_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ier = priv->read_reg(priv, XCAN_IER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) ier &= ~rx_int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) priv->write_reg(priv, XCAN_IER_OFFSET, ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * xcan_chip_stop - Driver stop routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * This is the drivers stop routine. It will disable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * interrupts and put the device into configuration mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static void xcan_chip_stop(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* Disable interrupts and leave the can in configuration mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ret = set_reset_mode(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) netdev_dbg(ndev, "set_reset_mode() Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) priv->can.state = CAN_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * xcan_open - Driver open routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * This is the driver open routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static int xcan_open(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ret = pm_runtime_get_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ndev->name, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) netdev_err(ndev, "irq allocation for CAN failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Set chip into reset mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ret = set_reset_mode(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) netdev_err(ndev, "mode resetting failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* Common open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ret = open_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ret = xcan_chip_start(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) netdev_err(ndev, "xcan_chip_start failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) goto err_candev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) can_led_event(ndev, CAN_LED_EVENT_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) netif_start_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) err_candev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) close_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) pm_runtime_put(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * xcan_close - Driver close routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * Return: 0 always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static int xcan_close(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) xcan_chip_stop(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) close_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) can_led_event(ndev, CAN_LED_EVENT_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pm_runtime_put(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * xcan_get_berr_counter - error counter routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * @ndev: Pointer to net_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * @bec: Pointer to can_berr_counter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * This is the driver error counter routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static int xcan_get_berr_counter(const struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct can_berr_counter *bec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ret = pm_runtime_get_sync(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pm_runtime_put(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) pm_runtime_put(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static const struct net_device_ops xcan_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .ndo_open = xcan_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .ndo_stop = xcan_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .ndo_start_xmit = xcan_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) .ndo_change_mtu = can_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * xcan_suspend - Suspend method for the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * @dev: Address of the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * Put the driver into low power mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static int __maybe_unused xcan_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) netif_stop_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) xcan_chip_stop(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * xcan_resume - Resume from suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * @dev: Address of the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * Resume operation after suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static int __maybe_unused xcan_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ret = pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) dev_err(dev, "pm_runtime_force_resume failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ret = xcan_chip_start(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) dev_err(dev, "xcan_chip_start failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) netif_start_queue(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * xcan_runtime_suspend - Runtime suspend method for the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * @dev: Address of the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * Put the driver into low power mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * Return: 0 always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static int __maybe_unused xcan_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) clk_disable_unprepare(priv->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) clk_disable_unprepare(priv->can_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * xcan_runtime_resume - Runtime resume from suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * @dev: Address of the device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * Resume operation after suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static int __maybe_unused xcan_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct net_device *ndev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) ret = clk_prepare_enable(priv->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) dev_err(dev, "Cannot enable clock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ret = clk_prepare_enable(priv->can_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) dev_err(dev, "Cannot enable clock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) clk_disable_unprepare(priv->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static const struct dev_pm_ops xcan_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static const struct xcan_devtype_data xcan_zynq_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) .cantype = XZYNQ_CANPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) .flags = XCAN_FLAG_TXFEMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) .bittiming_const = &xcan_bittiming_const,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) .bus_clk_name = "pclk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static const struct xcan_devtype_data xcan_axi_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .cantype = XAXI_CAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) .bittiming_const = &xcan_bittiming_const,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) .bus_clk_name = "s_axi_aclk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) static const struct xcan_devtype_data xcan_canfd_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) .cantype = XAXI_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) .flags = XCAN_FLAG_EXT_FILTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) XCAN_FLAG_RXMNF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) XCAN_FLAG_TX_MAILBOXES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) XCAN_FLAG_RX_FIFO_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) .bittiming_const = &xcan_bittiming_const_canfd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) .bus_clk_name = "s_axi_aclk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static const struct xcan_devtype_data xcan_canfd2_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) .cantype = XAXI_CANFD_2_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) .flags = XCAN_FLAG_EXT_FILTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) XCAN_FLAG_RXMNF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) XCAN_FLAG_TX_MAILBOXES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) XCAN_FLAG_CANFD_2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) XCAN_FLAG_RX_FIFO_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) .bittiming_const = &xcan_bittiming_const_canfd2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) .bus_clk_name = "s_axi_aclk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* Match table for OF platform binding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static const struct of_device_id xcan_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) { /* end of list */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) MODULE_DEVICE_TABLE(of, xcan_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * xcan_probe - Platform registration call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * @pdev: Handle to the platform device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * This function does all the memory allocation and registration for the CAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * Return: 0 on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int xcan_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct xcan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) const struct xcan_devtype_data *devtype = &xcan_axi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) int rx_max, tx_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) u32 hw_tx_max = 0, hw_rx_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) const char *hw_tx_max_property;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) /* Get the virtual base address for the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) addr = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (IS_ERR(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ret = PTR_ERR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) of_id = of_match_device(xcan_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (of_id && of_id->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) devtype = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) "tx-mailbox-count" : "tx-fifo-depth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) &hw_tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dev_err(&pdev->dev, "missing %s property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) hw_tx_max_property);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) &hw_rx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) "missing rx-fifo-depth property (mailbox mode is not supported)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* With TX FIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * There is no way to directly figure out how many frames have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * sent when the TXOK interrupt is processed. If TXFEMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * is supported, we can have 2 frames in the FIFO and use TXFEMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * to determine if 1 or 2 frames have been sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * Theoretically we should be able to use TXFWMEMP to determine up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * to 3 frames, but it seems that after putting a second frame in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * sent), which is not a sensible state - possibly TXFWMEMP is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * completely synchronized with the rest of the bits?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * With TX mailboxes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * HW sends frames in CAN ID priority order. To preserve FIFO ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * we submit frames one at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) (devtype->flags & XCAN_FLAG_TXFEMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) tx_max = min(hw_tx_max, 2U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) tx_max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) rx_max = hw_rx_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Create a CAN device instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) priv->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) priv->can.bittiming_const = devtype->bittiming_const;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) priv->can.do_set_mode = xcan_do_set_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) priv->can.do_get_berr_counter = xcan_get_berr_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) CAN_CTRLMODE_BERR_REPORTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (devtype->cantype == XAXI_CANFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) priv->can.data_bittiming_const =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) &xcan_data_bittiming_const_canfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (devtype->cantype == XAXI_CANFD_2_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) priv->can.data_bittiming_const =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) &xcan_data_bittiming_const_canfd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (devtype->cantype == XAXI_CANFD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) devtype->cantype == XAXI_CANFD_2_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) priv->reg_base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) priv->tx_max = tx_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) priv->devtype = *devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) spin_lock_init(&priv->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* Get IRQ for the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ndev->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ndev->flags |= IFF_ECHO; /* We support local echo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) platform_set_drvdata(pdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ndev->netdev_ops = &xcan_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* Getting the CAN can_clk info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (IS_ERR(priv->can_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) dev_err(&pdev->dev, "Device clock not found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) ret = PTR_ERR(priv->can_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (IS_ERR(priv->bus_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) dev_err(&pdev->dev, "bus clock not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) ret = PTR_ERR(priv->bus_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) priv->write_reg = xcan_write_reg_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) priv->read_reg = xcan_read_reg_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ret = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) goto err_disableclks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) priv->write_reg = xcan_write_reg_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) priv->read_reg = xcan_read_reg_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) priv->can.clock.freq = clk_get_rate(priv->can_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ret = register_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) goto err_disableclks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) devm_can_led_init(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) priv->reg_base, ndev->irq, priv->can.clock.freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) hw_tx_max, priv->tx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) err_disableclks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) pm_runtime_put(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) free_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * xcan_remove - Unregister the device after releasing the resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * @pdev: Handle to the platform device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * This function frees all the resources allocated to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * Return: 0 always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static int xcan_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct xcan_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) unregister_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) free_candev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static struct platform_driver xcan_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) .probe = xcan_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) .remove = xcan_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .pm = &xcan_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) .of_match_table = xcan_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) module_platform_driver(xcan_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) MODULE_AUTHOR("Xilinx Inc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) MODULE_DESCRIPTION("Xilinx CAN interface");