^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * 2012 (c) Aeroflex Gaisler AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * VHDL IP core library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Full documentation of the GRCAN core can be found here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * http://www.gaisler.com/products/grlib/grip.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * open firmware properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * sysfs interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * See "Documentation/admin-guide/kernel-parameters.rst" for information on the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Contributors: Andreas Larsson <andreas@gaisler.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/can/dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DRV_NAME "grcan"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define GRCAN_NAPI_WEIGHT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct grcan_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 conf; /* 0x00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 stat; /* 0x04 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 ctrl; /* 0x08 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 smask; /* 0x18 - CanMASK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 scode; /* 0x1c - CanCODE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 pimsr; /* 0x100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 pimr; /* 0x104 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32 pisr; /* 0x108 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 pir; /* 0x10C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 imr; /* 0x110 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 picr; /* 0x114 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 txctrl; /* 0x200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 txaddr; /* 0x204 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 txsize; /* 0x208 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 txwr; /* 0x20C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 txrd; /* 0x210 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 txirq; /* 0x214 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 rxctrl; /* 0x300 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 rxaddr; /* 0x304 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 rxsize; /* 0x308 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 rxwr; /* 0x30C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u32 rxrd; /* 0x310 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 rxirq; /* 0x314 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 rxmask; /* 0x318 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 rxcode; /* 0x31C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define GRCAN_CONF_ABORT 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define GRCAN_CONF_ENABLE0 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define GRCAN_CONF_ENABLE1 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define GRCAN_CONF_SELECT 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define GRCAN_CONF_SILENT 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define GRCAN_CONF_SAM 0x00000020 /* Available in some hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define GRCAN_CONF_BPR 0x00000300 /* Note: not BRP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define GRCAN_CONF_RSJ 0x00007000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define GRCAN_CONF_PS1 0x00f00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define GRCAN_CONF_PS2 0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define GRCAN_CONF_SCALER 0xff000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define GRCAN_CONF_OPERATION \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) (GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define GRCAN_CONF_TIMING \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) (GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define GRCAN_CONF_RSJ_MIN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define GRCAN_CONF_RSJ_MAX 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define GRCAN_CONF_PS1_MIN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define GRCAN_CONF_PS1_MAX 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define GRCAN_CONF_PS2_MIN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define GRCAN_CONF_PS2_MAX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define GRCAN_CONF_SCALER_MIN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define GRCAN_CONF_SCALER_MAX 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define GRCAN_CONF_SCALER_INC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define GRCAN_CONF_BPR_BIT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define GRCAN_CONF_RSJ_BIT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define GRCAN_CONF_PS1_BIT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define GRCAN_CONF_PS2_BIT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define GRCAN_CONF_SCALER_BIT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define GRCAN_STAT_PASS 0x000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define GRCAN_STAT_OFF 0x000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define GRCAN_STAT_OR 0x000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define GRCAN_STAT_AHBERR 0x000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define GRCAN_STAT_ACTIVE 0x000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define GRCAN_STAT_RXERRCNT 0x00ff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define GRCAN_STAT_TXERRCNT 0xff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define GRCAN_STAT_ERRCTR_RELATED (GRCAN_STAT_PASS | GRCAN_STAT_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define GRCAN_STAT_RXERRCNT_BIT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define GRCAN_STAT_TXERRCNT_BIT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define GRCAN_STAT_ERRCNT_WARNING_LIMIT 96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT 127
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define GRCAN_CTRL_RESET 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define GRCAN_CTRL_ENABLE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define GRCAN_TXCTRL_ENABLE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define GRCAN_TXCTRL_ONGOING 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define GRCAN_TXCTRL_SINGLE 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define GRCAN_RXCTRL_ENABLE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define GRCAN_RXCTRL_ONGOING 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Relative offset of IRQ sources to AMBA Plug&Play */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define GRCAN_IRQIX_IRQ 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define GRCAN_IRQIX_TXSYNC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define GRCAN_IRQIX_RXSYNC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define GRCAN_IRQ_PASS 0x00001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define GRCAN_IRQ_OFF 0x00002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define GRCAN_IRQ_OR 0x00004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define GRCAN_IRQ_RXAHBERR 0x00008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define GRCAN_IRQ_TXAHBERR 0x00010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define GRCAN_IRQ_RXIRQ 0x00020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define GRCAN_IRQ_TXIRQ 0x00040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define GRCAN_IRQ_RXFULL 0x00080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define GRCAN_IRQ_TXEMPTY 0x00100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define GRCAN_IRQ_RX 0x00200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define GRCAN_IRQ_TX 0x00400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define GRCAN_IRQ_RXSYNC 0x00800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define GRCAN_IRQ_TXSYNC 0x01000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define GRCAN_IRQ_RXERRCTR 0x02000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define GRCAN_IRQ_TXERRCTR 0x04000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define GRCAN_IRQ_RXMISS 0x08000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define GRCAN_IRQ_TXLOSS 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define GRCAN_IRQ_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define GRCAN_IRQ_ALL \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) (GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) | GRCAN_IRQ_TXLOSS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) | GRCAN_IRQ_TXLOSS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define GRCAN_MSG_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define GRCAN_MSG_IDE 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define GRCAN_MSG_RTR 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define GRCAN_MSG_BID 0x1ffc0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define GRCAN_MSG_EID 0x1fffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define GRCAN_MSG_IDE_BIT 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define GRCAN_MSG_RTR_BIT 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define GRCAN_MSG_BID_BIT 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define GRCAN_MSG_EID_BIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define GRCAN_MSG_DLC 0xf0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define GRCAN_MSG_TXERRC 0x00ff0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define GRCAN_MSG_RXERRC 0x0000ff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define GRCAN_MSG_DLC_BIT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define GRCAN_MSG_TXERRC_BIT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define GRCAN_MSG_RXERRC_BIT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define GRCAN_MSG_AHBERR 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define GRCAN_MSG_OR 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define GRCAN_MSG_OFF 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define GRCAN_MSG_PASS 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define GRCAN_BUFFER_ALIGNMENT 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define GRCAN_DEFAULT_BUFFER_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define GRCAN_VALID_TR_SIZE_MASK 0x001fffc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define GRCAN_INVALID_BUFFER_SIZE(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #error "Invalid default buffer size"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct grcan_dma_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dma_addr_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct grcan_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) size_t base_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void *base_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dma_addr_t base_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct grcan_dma_buffer tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct grcan_dma_buffer rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* GRCAN configuration parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct grcan_device_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned short enable0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned short enable1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned short select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned int txsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned int rxsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define GRCAN_DEFAULT_DEVICE_CONFIG { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .enable0 = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .enable1 = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .select = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .txsize = GRCAN_DEFAULT_BUFFER_SIZE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define GRLIB_VERSION_MASK 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* GRCAN private data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct grcan_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct can_priv can; /* must be the first member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct grcan_registers __iomem *regs; /* ioremap'ed registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct grcan_device_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct grcan_dma dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct sk_buff **echo_skb; /* We allocate this on our own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u8 *txdlc; /* Length of queued frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* The echo skb pointer, pointing into echo_skb and indicating which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * frames can be echoed back. See the "Notes on the tx cyclic buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * handling"-comment for grcan_start_xmit for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u32 eskbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Lock for controlling changes to the netif tx queue state, accesses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * the echo_skb pointer eskbp and for making sure that a running reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * and/or a close of the interface is done without interference from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * other parts of the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * The echo_skb pointer, eskbp, should only be accessed under this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * as it can be changed in several places and together with decisions on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * whether to wake up the tx queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * The tx queue must never be woken up if there is a running reset or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * close in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * A running reset (see below on need_txbug_workaround) should never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * done if the interface is closing down and several running resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * should never be scheduled simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Whether a workaround is needed due to a bug in older hardware. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * this case, the driver both tries to prevent the bug from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * triggered and recovers, if the bug nevertheless happens, by doing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * running reset. A running reset, resets the device and continues from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * where it were without being noticeable from outside the driver (apart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * from slight delays).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bool need_txbug_workaround;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* To trigger initization of running reset and to trigger running reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * respectively in the case of a hanged device due to a txbug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct timer_list hang_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct timer_list rr_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* To avoid waking up the netif queue and restarting timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * when a reset is scheduled or when closing of the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * undergoing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bool resetting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bool closing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Wait time for a short wait for ongoing to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define GRCAN_SHORTWAIT_USECS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Limit on the number of transmitted bits of an eff frame according to the CAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * bits end of frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define GRCAN_EFF_FRAME_MAX_BITS (1+32+6+8*8+16+2+7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #if defined(__BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline u32 grcan_read_reg(u32 __iomem *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ioread32be(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) iowrite32be(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static inline u32 grcan_read_reg(u32 __iomem *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ioread32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) iowrite32(val, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) grcan_write_reg(reg, grcan_read_reg(reg) & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static inline void grcan_set_bits(u32 __iomem *reg, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) grcan_write_reg(reg, grcan_read_reg(reg) | mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return grcan_read_reg(reg) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 old = grcan_read_reg(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) grcan_write_reg(reg, (old & ~mask) | (value & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* a and b should both be in [0,size] and a == b == size should not hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline u32 grcan_ring_add(u32 a, u32 b, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u32 sum = a + b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (sum < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return sum - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* a and b should both be in [0,size) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return grcan_ring_add(a, size - b, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Available slots for new transmissions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 slots = txsize / GRCAN_MSG_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return slots - used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Configuration parameters that can be set via module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static struct grcan_device_config grcan_module_config =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) GRCAN_DEFAULT_DEVICE_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static const struct can_bittiming_const grcan_bittiming_const = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .tseg1_min = GRCAN_CONF_PS1_MIN + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .tseg1_max = GRCAN_CONF_PS1_MAX + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) .tseg2_min = GRCAN_CONF_PS2_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .tseg2_max = GRCAN_CONF_PS2_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .sjw_max = GRCAN_CONF_RSJ_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .brp_min = GRCAN_CONF_SCALER_MIN + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .brp_max = GRCAN_CONF_SCALER_MAX + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .brp_inc = GRCAN_CONF_SCALER_INC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int grcan_set_bittiming(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct can_bittiming *bt = &priv->can.bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u32 timing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int bpr, rsj, ps1, ps2, scaler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Should never happen - function will not be called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * device is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (grcan_read_bits(®s->ctrl, GRCAN_CTRL_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) bpr = 0; /* Note bpr and brp are different concepts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rsj = bt->sjw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ps2 = bt->phase_seg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) scaler = (bt->brp - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bpr, rsj, ps1, ps2, scaler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!(ps1 > ps2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ps1, ps2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!(ps2 >= rsj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ps2, rsj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) netdev_info(dev, "setting timing=0x%x\n", timing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) grcan_write_bits(®s->conf, timing, GRCAN_CONF_TIMING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int grcan_get_berr_counter(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct can_berr_counter *bec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u32 status = grcan_read_reg(®s->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int grcan_poll(struct napi_struct *napi, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Reset device, but keep configuration information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void grcan_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 config = grcan_read_reg(®s->conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) grcan_set_bits(®s->ctrl, GRCAN_CTRL_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) grcan_write_reg(®s->conf, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) priv->eskbp = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) priv->can.state = CAN_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Turn off hardware filtering - regs->rxcode set to 0 by reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) grcan_write_reg(®s->rxmask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* stop device without changing any configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void grcan_stop_hardware(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) grcan_write_reg(®s->imr, GRCAN_IRQ_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) grcan_clear_bits(®s->txctrl, GRCAN_TXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) grcan_clear_bits(®s->rxctrl, GRCAN_RXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) grcan_clear_bits(®s->ctrl, GRCAN_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * is true and free them otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * continue until priv->eskbp catches up to regs->txrd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * priv->lock *must* be held when calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int i, work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Updates to priv->eskbp and wake-ups of the queue needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * be atomic towards the reads of priv->eskbp and shut-downs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * of the queue in grcan_start_xmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) u32 txrd = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) for (work_done = 0; work_done < budget || budget < 0; work_done++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (priv->eskbp == txrd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) i = priv->eskbp / GRCAN_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (echo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Normal echo of messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) stats->tx_bytes += priv->txdlc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) priv->txdlc[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) can_get_echo_skb(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* For cleanup of untransmitted messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) can_free_echo_skb(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dma->tx.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) txrd = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void grcan_lost_one_shot_frame(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 txrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) catch_up_echo_skb(dev, -1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (unlikely(grcan_read_bits(®s->txctrl, GRCAN_TXCTRL_ENABLE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* By the time an GRCAN_IRQ_TXLOSS is generated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * one-shot mode there is no problem in writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * to TXRD even in versions of the hardware in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * which GRCAN_TXCTRL_ONGOING is not cleared properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * in one-shot mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Skip message and discard echo-skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) txrd = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) grcan_write_reg(®s->txrd, txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) catch_up_echo_skb(dev, -1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!priv->resetting && !priv->closing &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) grcan_set_bits(®s->txctrl, GRCAN_TXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void grcan_err(struct net_device *dev, u32 sources, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct can_frame cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Zero potential error_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) memset(&cf, 0, sizeof(cf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Message lost interrupt. This might be due to arbitration error, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * is also triggered when there is no one else on the can bus or when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * there is a problem with the hardware interface or the bus itself. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * arbitration errors can not be singled out, no error frames are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * generated reporting this event as an arbitration error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (sources & GRCAN_IRQ_TXLOSS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Take care of failed one-shot transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) grcan_lost_one_shot_frame(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Stop printing as soon as error passive or bus off is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * effect to limit the amount of txloss debug printouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!(status & GRCAN_STAT_ERRCTR_RELATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) netdev_dbg(dev, "tx message lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Conditions dealing with the error counters. There is no interrupt for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * error warning, but there are interrupts for increases of the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if ((sources & GRCAN_IRQ_ERRCTR_RELATED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (status & GRCAN_STAT_ERRCTR_RELATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) enum can_state state = priv->can.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) enum can_state oldstate = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 txerr = (status & GRCAN_STAT_TXERRCNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) >> GRCAN_STAT_TXERRCNT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u32 rxerr = (status & GRCAN_STAT_RXERRCNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) >> GRCAN_STAT_RXERRCNT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Figure out current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (status & GRCAN_STAT_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) state = CAN_STATE_BUS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } else if (status & GRCAN_STAT_PASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) state = CAN_STATE_ERROR_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) } else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) state = CAN_STATE_ERROR_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Handle and report state changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (state != oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) case CAN_STATE_BUS_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) netdev_dbg(dev, "bus-off\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) priv->can.can_stats.bus_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Prevent the hardware from recovering from bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * off on its own if restart is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!priv->can.restart_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) grcan_stop_hardware(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cf.can_id |= CAN_ERR_BUSOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case CAN_STATE_ERROR_PASSIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) netdev_dbg(dev, "Error passive condition\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) priv->can.can_stats.error_passive++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case CAN_STATE_ERROR_WARNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) netdev_dbg(dev, "Error warning condition\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) priv->can.can_stats.error_warning++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cf.data[1] |= CAN_ERR_CRTL_TX_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) cf.data[1] |= CAN_ERR_CRTL_RX_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) case CAN_STATE_ERROR_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) netdev_dbg(dev, "Error active condition\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* There are no others at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) cf.data[6] = txerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) cf.data[7] = rxerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) priv->can.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* Report automatic restarts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) cf.can_id |= CAN_ERR_RESTARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) netdev_dbg(dev, "restarted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) priv->can.can_stats.restarts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!priv->resetting && !priv->closing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) u32 txwr = grcan_read_reg(®s->txwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (grcan_txspace(dma->tx.size, txwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) priv->eskbp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Data overrun interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) netdev_dbg(dev, "got data overrun interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) stats->rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cf.can_id |= CAN_ERR_CRTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* AHB bus error interrupts (not CAN bus errors) - shut down the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) (status & GRCAN_STAT_AHBERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) char *txrx = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (sources & GRCAN_IRQ_TXAHBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) txrx = "on tx ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) } else if (sources & GRCAN_IRQ_RXAHBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) txrx = "on rx ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) netdev_err(dev, "Fatal AHB bus error %s- halting device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) txrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Prevent anything to be enabled again and halt device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) priv->closing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) grcan_stop_hardware(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) priv->can.state = CAN_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* Pass on error frame if something to report,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * i.e. id contains some information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (cf.can_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct can_frame *skb_cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) netdev_dbg(dev, "could not allocate error frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) skb_cf->can_id |= cf.can_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) memcpy(skb_cf->data, cf.data, sizeof(cf.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static irqreturn_t grcan_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u32 sources, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Find out the source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) sources = grcan_read_reg(®s->pimsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!sources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) grcan_write_reg(®s->picr, sources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) status = grcan_read_reg(®s->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* If we got TX progress, the device has not hanged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * so disable the hang timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (priv->need_txbug_workaround &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) del_timer(&priv->hang_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Frame(s) received or transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Disable tx/rx interrupts and schedule poll(). No need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * locking as interference from a running reset at worst leads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * to an extra interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) grcan_clear_bits(®s->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) napi_schedule(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* (Potential) error conditions to take care of */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (sources & GRCAN_IRQ_ERRORS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) grcan_err(dev, sources, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Reset device and restart operations from where they were.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * This assumes that RXCTRL & RXCTRL is properly disabled and that RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * for single shot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void grcan_running_reset(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct grcan_priv *priv = from_timer(priv, t, rr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct net_device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* This temporarily messes with eskbp, so we need to lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * priv->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) priv->resetting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) del_timer(&priv->hang_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) del_timer(&priv->rr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!priv->closing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Save and reset - config register preserved by grcan_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u32 imr = grcan_read_reg(®s->imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u32 txaddr = grcan_read_reg(®s->txaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) u32 txsize = grcan_read_reg(®s->txsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u32 txwr = grcan_read_reg(®s->txwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 txrd = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) u32 eskbp = priv->eskbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u32 rxaddr = grcan_read_reg(®s->rxaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) u32 rxsize = grcan_read_reg(®s->rxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u32 rxwr = grcan_read_reg(®s->rxwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 rxrd = grcan_read_reg(®s->rxrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) grcan_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) grcan_write_reg(®s->txaddr, txaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) grcan_write_reg(®s->txsize, txsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) grcan_write_reg(®s->txwr, txwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) grcan_write_reg(®s->txrd, txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) priv->eskbp = eskbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) grcan_write_reg(®s->rxaddr, rxaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) grcan_write_reg(®s->rxsize, rxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) grcan_write_reg(®s->rxwr, rxwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) grcan_write_reg(®s->rxrd, rxrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* Turn on device again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) grcan_write_reg(®s->imr, imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) priv->can.state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) grcan_write_reg(®s->txctrl, GRCAN_TXCTRL_ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ? GRCAN_TXCTRL_SINGLE : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) grcan_write_reg(®s->rxctrl, GRCAN_RXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) grcan_write_reg(®s->ctrl, GRCAN_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Start queue if there is size and listen-onle mode is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) netdev_err(dev, "Device reset and restored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Waiting time in usecs corresponding to the transmission of three maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * sized can frames in the given bitrate (in bits/sec). Waiting for this amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * of time makes sure that the can controller have time to finish sending or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * receiving a frame with a good margin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * usecs/sec * number of frames * bits/frame / bits/sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Set timer so that it will not fire until after a period in which the can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * controller have a good margin to finish transmitting a frame unless it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * hanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) mod_timer(timer, jiffies + wait_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Disable channels and schedule a running reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static void grcan_initiate_running_reset(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct grcan_priv *priv = from_timer(priv, t, hang_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct net_device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) netdev_err(dev, "Device seems hanged - reset scheduled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* The main body of this function must never be executed again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * until after an execution of grcan_running_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!priv->resetting && !priv->closing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) priv->resetting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) grcan_clear_bits(®s->txctrl, GRCAN_TXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) grcan_clear_bits(®s->rxctrl, GRCAN_RXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static void grcan_free_dma_buffers(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dma->base_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) memset(dma, 0, sizeof(*dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int grcan_allocate_dma_buffers(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) size_t tsize, size_t rsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) size_t shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * i.e. first buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) size_t maxs = max(tsize, rsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Put the small buffer after that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) size_t ssize = min(tsize, rsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dma->base_buf = dma_alloc_coherent(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dma->base_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) &dma->base_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!dma->base_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dma->tx.size = tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dma->rx.size = rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) small->handle = large->handle + lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) shift = large->handle - dma->base_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) large->buf = dma->base_buf + shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) small->buf = large->buf + lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* priv->lock *must* be held when calling this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int grcan_start(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u32 confop, txctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) grcan_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) grcan_write_reg(®s->txaddr, priv->dma.tx.handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) grcan_write_reg(®s->txsize, priv->dma.tx.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) grcan_write_reg(®s->rxaddr, priv->dma.rx.handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) grcan_write_reg(®s->rxsize, priv->dma.rx.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* regs->rxwr and regs->rxrd already set to 0 by reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) grcan_read_reg(®s->pir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) grcan_write_reg(®s->imr, GRCAN_IRQ_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Enable interfaces, channels and device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) confop = GRCAN_CONF_ABORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) | (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) | (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) | (priv->config.select ? GRCAN_CONF_SELECT : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) | (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) GRCAN_CONF_SILENT : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) GRCAN_CONF_SAM : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) grcan_write_bits(®s->conf, confop, GRCAN_CONF_OPERATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) txctrl = GRCAN_TXCTRL_ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ? GRCAN_TXCTRL_SINGLE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) grcan_write_reg(®s->txctrl, txctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) grcan_write_reg(®s->rxctrl, GRCAN_RXCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) grcan_write_reg(®s->ctrl, GRCAN_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) priv->can.state = CAN_STATE_ERROR_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int grcan_set_mode(struct net_device *dev, enum can_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (mode == CAN_MODE_START) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* This might be called to restart the device to recover from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * bus off errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (priv->closing || priv->resetting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) netdev_info(dev, "Restarting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) grcan_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int grcan_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* Allocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) err = grcan_allocate_dma_buffers(dev, priv->config.txsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) priv->config.rxsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) netdev_err(dev, "could not allocate DMA buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) priv->echo_skb = kcalloc(dma->tx.size, sizeof(*priv->echo_skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!priv->echo_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto exit_free_dma_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) priv->can.echo_skb_max = dma->tx.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) priv->can.echo_skb = priv->echo_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (!priv->txdlc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto exit_free_echo_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Get can device up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) err = open_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) goto exit_free_txdlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto exit_close_candev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) napi_enable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) grcan_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) priv->resetting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) priv->closing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) exit_close_candev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) close_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) exit_free_txdlc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) kfree(priv->txdlc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) exit_free_echo_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) kfree(priv->echo_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) exit_free_dma_buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) grcan_free_dma_buffers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int grcan_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) napi_disable(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) priv->closing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (priv->need_txbug_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) del_timer_sync(&priv->hang_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) del_timer_sync(&priv->rr_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) grcan_stop_hardware(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) priv->can.state = CAN_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) close_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) grcan_free_dma_buffers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) priv->can.echo_skb_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) priv->can.echo_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) kfree(priv->echo_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) kfree(priv->txdlc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static int grcan_transmit_catch_up(struct net_device *dev, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) work_done = catch_up_echo_skb(dev, budget, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (work_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!priv->resetting && !priv->closing &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* With napi we don't get TX interrupts for a while,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * so prevent a running reset while catching up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (priv->need_txbug_workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) del_timer(&priv->hang_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static int grcan_receive(struct net_device *dev, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct can_frame *cf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) u32 wr, rd, startrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) u32 *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u32 i, rtr, eff, j, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) rd = grcan_read_reg(®s->rxrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) startrd = rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) for (work_done = 0; work_done < budget; work_done++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Check for packet to receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) wr = grcan_read_reg(®s->rxwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (rd == wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* Take care of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) skb = alloc_can_skb(dev, &cf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) netdev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) "dropping frame: skb allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) slot = dma->rx.buf + rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) eff = slot[0] & GRCAN_MSG_IDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) rtr = slot[0] & GRCAN_MSG_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (eff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cf->can_id = ((slot[0] & GRCAN_MSG_EID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) >> GRCAN_MSG_EID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) cf->can_id |= CAN_EFF_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) cf->can_id = ((slot[0] & GRCAN_MSG_BID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) >> GRCAN_MSG_BID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) >> GRCAN_MSG_DLC_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (rtr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) cf->can_id |= CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) for (i = 0; i < cf->can_dlc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) j = GRCAN_MSG_DATA_SLOT_INDEX(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) shift = GRCAN_MSG_DATA_SHIFT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) cf->data[i] = (u8)(slot[j] >> shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Update statistics and read pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Make sure everything is read before allowing hardware to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * use the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* Update read pointer - no need to check for ongoing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (likely(rd != startrd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) grcan_write_reg(®s->rxrd, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static int grcan_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct net_device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int tx_work_done, rx_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int rx_budget = budget / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) int tx_budget = budget - rx_budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Half of the budget for receiving messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rx_work_done = grcan_receive(dev, rx_budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Half of the budget for transmitting messages as that can trigger echo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * frames being received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* Guarantee no interference with a running reset that otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * could turn off interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Enable tx and rx interrupts again. No need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * priv->closing as napi_disable in grcan_close is waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * scheduled napi calls to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) grcan_set_bits(®s->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return rx_work_done + tx_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* Work tx bug by waiting while for the risky situation to clear. If that fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * drop a frame in one-shot mode or indicate a busy device otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * value that should be returned by grcan_start_xmit when aborting the xmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) u32 txwr, u32 oneshotmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) netdev_tx_t *netdev_tx_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* Wait a while for ongoing to be cleared or read pointer to catch up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * write pointer. The latter is needed due to a bug in older versions of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * GRCAN in which ONGOING is not cleared properly one-shot mode when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * transmission fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!grcan_read_bits(®s->txctrl, GRCAN_TXCTRL_ONGOING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) grcan_read_reg(®s->txrd) == txwr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* Clean up, in case the situation was not resolved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!priv->resetting && !priv->closing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* Queue might have been stopped earlier in grcan_start_xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (grcan_txspace(dma->tx.size, txwr, priv->eskbp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Set a timer to resolve a hanged tx controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!timer_pending(&priv->hang_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) grcan_reset_timer(&priv->hang_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) priv->can.bittiming.bitrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (oneshotmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* In one-shot mode we should never end up here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * then the interrupt handler increases txrd on TXLOSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * but it is consistent with one-shot mode to drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * frame in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *netdev_tx_status = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* In normal mode the socket-can transmission queue get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * to keep the frame so that it can be retransmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) *netdev_tx_status = NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Notes on the tx cyclic buffer handling:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * regs->txwr - the next slot for the driver to put data to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * regs->txrd - the next slot for the device to read data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * priv->eskbp - the next slot for the driver to call can_put_echo_skb for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * grcan_start_xmit can enter more messages as long as regs->txwr does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * not reach priv->eskbp (within 1 message gap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * The device sends messages until regs->txrd reaches regs->txwr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * The interrupt calls handler calls can_put_echo_skb until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * priv->eskbp reaches regs->txrd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct grcan_registers __iomem *regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct grcan_dma *dma = &priv->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct can_frame *cf = (struct can_frame *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) u32 id, txwr, txrd, space, txctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) int slotindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) u32 *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) u32 i, rtr, eff, dlc, tmp, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int j, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (can_dropped_invalid_skb(dev, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /* Trying to transmit in silent mode will generate error interrupts, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * this should never happen - the queue should not have been started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* Reads of priv->eskbp and shut-downs of the queue needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * be atomic towards the updates to priv->eskbp and wake-ups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * of the queue in the interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) txwr = grcan_read_reg(®s->txwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) space = grcan_txspace(dma->tx.size, txwr, priv->eskbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) slotindex = txwr / GRCAN_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) slot = dma->tx.buf + txwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (unlikely(space == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* End of critical section*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* This should never happen. If circular buffer is full, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * netif_stop_queue should have been stopped already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (unlikely(!space)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) netdev_err(dev, "No buffer space, but queue is non-stopped.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* Convert and write CAN message to DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) eff = cf->can_id & CAN_EFF_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) rtr = cf->can_id & CAN_RTR_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dlc = cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (eff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) slot[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) slot[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) for (i = 0; i < dlc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) j = GRCAN_MSG_DATA_SLOT_INDEX(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) shift = GRCAN_MSG_DATA_SHIFT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) slot[j] |= cf->data[i] << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* Checking that channel has not been disabled. These cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * should never happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) txctrl = grcan_read_reg(®s->txctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!(txctrl & GRCAN_TXCTRL_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) netdev_err(dev, "tx channel spuriously disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) netdev_err(dev, "one-shot mode spuriously disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* Bug workaround for old version of grcan where updating txwr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * in the same clock cycle as the controller updates txrd to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * the current txwr could hang the can controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (priv->need_txbug_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) txrd = grcan_read_reg(®s->txrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) netdev_tx_t txstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) err = grcan_txbug_workaround(dev, skb, txwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) oneshotmode, &txstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return txstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* Prepare skb for echoing. This must be after the bug workaround above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * as ownership of the skb is passed on by calling can_put_echo_skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * can_put_echo_skb would be an error unless other measures are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) can_put_echo_skb(skb, dev, slotindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* Make sure everything is written before allowing hardware to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * read from the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /* Update write pointer to start transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) grcan_write_reg(®s->txwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* ========== Setting up sysfs interface and module parameters ========== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) #define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) #define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static void grcan_sanitize_##name(struct platform_device *pd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct grcan_device_config grcan_default_config \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) = GRCAN_DEFAULT_DEVICE_CONFIG; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (valcheckf(grcan_module_config.name)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) dev_err(&pd->dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) "Invalid module parameter value for " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) #name " - setting default\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) grcan_module_config.name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) grcan_default_config.name; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) module_param_named(name, grcan_module_config.name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) mtype, 0444); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) MODULE_PARM_DESC(name, desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) #define GRCAN_CONFIG_ATTR(name, desc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static ssize_t grcan_store_##name(struct device *sdev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct device_attribute *att, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) const char *buf, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) size_t count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct net_device *dev = to_net_dev(sdev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct grcan_priv *priv = netdev_priv(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) u8 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) int ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (dev->flags & IFF_UP) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return -EBUSY; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ret = kstrtou8(buf, 0, &val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (ret < 0 || val > 1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return -EINVAL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) priv->config.name = val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return count; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static ssize_t grcan_show_##name(struct device *sdev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct device_attribute *att, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct net_device *dev = to_net_dev(sdev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct grcan_priv *priv = netdev_priv(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return sprintf(buf, "%d\n", priv->config.name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static DEVICE_ATTR(name, 0644, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) grcan_show_##name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) grcan_store_##name); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* The following configuration options are made available both via module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * parameters and writable sysfs files. See the chapter about GRCAN in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * documentation for the GRLIB VHDL library for further details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) GRCAN_CONFIG_ATTR(enable0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) "Configuration of physical interface 0. Determines\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) "the \"Enable 0\" bit of the configuration register.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) "Format: 0 | 1\nDefault: 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) GRCAN_CONFIG_ATTR(enable1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) "Configuration of physical interface 1. Determines\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) "the \"Enable 1\" bit of the configuration register.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) "Format: 0 | 1\nDefault: 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) GRCAN_CONFIG_ATTR(select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) "Select which physical interface to use.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) "Format: 0 | 1\nDefault: 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* The tx and rx buffer size configuration options are only available via module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) "Sets the size of the tx buffer.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) "Default: 1024\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) "Sets the size of the rx buffer.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) "Default: 1024\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* Function that makes sure that configuration done using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * module parameters are set to valid values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void grcan_sanitize_module_config(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) grcan_sanitize_enable0(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) grcan_sanitize_enable1(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) grcan_sanitize_select(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) grcan_sanitize_txsize(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) grcan_sanitize_rxsize(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static const struct attribute *const sysfs_grcan_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Config attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) &dev_attr_enable0.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) &dev_attr_enable1.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) &dev_attr_select.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static const struct attribute_group sysfs_grcan_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .name = "grcan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .attrs = (struct attribute **)sysfs_grcan_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* ========== Setting up the driver ========== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static const struct net_device_ops grcan_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .ndo_open = grcan_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) .ndo_stop = grcan_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .ndo_start_xmit = grcan_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) .ndo_change_mtu = can_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static int grcan_setup_netdev(struct platform_device *ofdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int irq, u32 ambafreq, bool txbug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct grcan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct grcan_registers __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dev = alloc_candev(sizeof(struct grcan_priv), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) dev->flags |= IFF_ECHO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) dev->netdev_ops = &grcan_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) dev->sysfs_groups[0] = &sysfs_grcan_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) memcpy(&priv->config, &grcan_module_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) sizeof(struct grcan_device_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) priv->regs = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) priv->can.bittiming_const = &grcan_bittiming_const;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) priv->can.do_set_bittiming = grcan_set_bittiming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) priv->can.do_set_mode = grcan_set_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) priv->can.do_get_berr_counter = grcan_get_berr_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) priv->can.clock.freq = ambafreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) priv->can.ctrlmode_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) priv->need_txbug_workaround = txbug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* Discover if triple sampling is supported by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) regs = priv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) grcan_set_bits(®s->ctrl, GRCAN_CTRL_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) grcan_set_bits(®s->conf, GRCAN_CONF_SAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (grcan_read_bits(®s->conf, GRCAN_CONF_SAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (priv->need_txbug_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) timer_setup(&priv->rr_timer, grcan_running_reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) SET_NETDEV_DEV(dev, &ofdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) priv->regs, dev->irq, priv->can.clock.freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) err = register_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) goto exit_free_candev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) platform_set_drvdata(ofdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /* Reset device to allow bit-timing to be set. No need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * grcan_reset at this stage. That is done in grcan_open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) grcan_write_reg(®s->ctrl, GRCAN_CTRL_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) exit_free_candev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) free_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) static int grcan_probe(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) u32 sysid, ambafreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) int irq, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) bool txbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* Compare GRLIB version number with the first that does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * have the tx bug (see start_xmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) err = of_property_read_u32(np, "systemid", &sysid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (!err && ((sysid & GRLIB_VERSION_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) txbug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) err = of_property_read_u32(np, "freq", &ambafreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) goto exit_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) base = devm_platform_ioremap_resource(ofdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (IS_ERR(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) err = PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) goto exit_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) dev_err(&ofdev->dev, "no irq found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) goto exit_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) grcan_sanitize_module_config(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) goto exit_dispose_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) exit_dispose_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) irq_dispose_mapping(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) exit_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dev_err(&ofdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) "%s socket CAN driver initialization failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) DRV_NAME, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static int grcan_remove(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct net_device *dev = platform_get_drvdata(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct grcan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) unregister_candev(dev); /* Will in turn call grcan_close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) irq_dispose_mapping(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) netif_napi_del(&priv->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) free_candev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static const struct of_device_id grcan_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {.name = "GAISLER_GRCAN"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {.name = "01_03d"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {.name = "GAISLER_GRHCAN"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {.name = "01_034"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) MODULE_DEVICE_TABLE(of, grcan_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) static struct platform_driver grcan_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) .of_match_table = grcan_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) .probe = grcan_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) .remove = grcan_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) module_platform_driver(grcan_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) MODULE_AUTHOR("Aeroflex Gaisler AB.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) MODULE_LICENSE("GPL");