Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Freescale lpuart serial port driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright 2012-2014 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /* All registers are 8-bit width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define UARTBDH			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define UARTBDL			0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define UARTCR1			0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define UARTCR2			0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define UARTSR1			0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define UARTCR3			0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define UARTDR			0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define UARTCR4			0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define UARTCR5			0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define UARTMODEM		0x0d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define UARTPFIFO		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define UARTCFIFO		0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define UARTSFIFO		0x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define UARTTWFIFO		0x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define UARTTCFIFO		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define UARTRWFIFO		0x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define UARTBDH_LBKDIE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define UARTBDH_RXEDGIE		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define UARTBDH_SBR_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define UARTCR1_LOOPS		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define UARTCR1_RSRC		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define UARTCR1_M		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define UARTCR1_WAKE		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define UARTCR1_ILT		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define UARTCR1_PE		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define UARTCR1_PT		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define UARTCR2_TIE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define UARTCR2_TCIE		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define UARTCR2_RIE		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define UARTCR2_ILIE		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define UARTCR2_TE		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define UARTCR2_RE		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define UARTCR2_RWU		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define UARTCR2_SBK		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define UARTSR1_TDRE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define UARTSR1_TC		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define UARTSR1_RDRF		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define UARTSR1_IDLE		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define UARTSR1_OR		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define UARTSR1_NF		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define UARTSR1_FE		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define UARTSR1_PE		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define UARTCR3_R8		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define UARTCR3_T8		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define UARTCR3_TXDIR		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define UARTCR3_TXINV		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define UARTCR3_ORIE		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define UARTCR3_NEIE		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define UARTCR3_FEIE		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define UARTCR3_PEIE		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define UARTCR4_MAEN1		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define UARTCR4_MAEN2		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define UARTCR4_M10		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define UARTCR4_BRFA_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define UARTCR4_BRFA_OFF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define UARTCR5_TDMAS		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define UARTCR5_RDMAS		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define UARTMODEM_RXRTSE	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define UARTMODEM_TXRTSPOL	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define UARTMODEM_TXRTSE	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define UARTMODEM_TXCTSE	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define UARTPFIFO_TXFE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define UARTPFIFO_FIFOSIZE_MASK	0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define UARTPFIFO_TXSIZE_OFF	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define UARTPFIFO_RXFE		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define UARTPFIFO_RXSIZE_OFF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define UARTCFIFO_TXFLUSH	0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define UARTCFIFO_RXFLUSH	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define UARTCFIFO_RXOFE		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define UARTCFIFO_TXOFE		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define UARTCFIFO_RXUFE		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define UARTSFIFO_TXEMPT	0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define UARTSFIFO_RXEMPT	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define UARTSFIFO_RXOF		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define UARTSFIFO_TXOF		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define UARTSFIFO_RXUF		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /* 32-bit register definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define UARTBAUD		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define UARTSTAT		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define UARTCTRL		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define UARTDATA		0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define UARTMATCH		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define UARTMODIR		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define UARTFIFO		0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define UARTWATER		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define UARTBAUD_MAEN1		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define UARTBAUD_MAEN2		0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define UARTBAUD_M10		0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define UARTBAUD_TDMAE		0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define UARTBAUD_RDMAE		0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define UARTBAUD_MATCFG		0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define UARTBAUD_BOTHEDGE	0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define UARTBAUD_RESYNCDIS	0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define UARTBAUD_LBKDIE		0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define UARTBAUD_RXEDGIE	0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define UARTBAUD_SBNS		0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define UARTBAUD_SBR		0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define UARTBAUD_SBR_MASK	0x1fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define UARTBAUD_OSR_MASK       0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define UARTBAUD_OSR_SHIFT      24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define UARTSTAT_LBKDIF		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define UARTSTAT_RXEDGIF	0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define UARTSTAT_MSBF		0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define UARTSTAT_RXINV		0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define UARTSTAT_RWUID		0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define UARTSTAT_BRK13		0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define UARTSTAT_LBKDE		0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define UARTSTAT_RAF		0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define UARTSTAT_TDRE		0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define UARTSTAT_TC		0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define UARTSTAT_RDRF		0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define UARTSTAT_IDLE		0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define UARTSTAT_OR		0x00080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define UARTSTAT_NF		0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define UARTSTAT_FE		0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define UARTSTAT_PE		0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define UARTSTAT_MA1F		0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define UARTSTAT_M21F		0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define UARTCTRL_R8T9		0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define UARTCTRL_R9T8		0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define UARTCTRL_TXDIR		0x20000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define UARTCTRL_TXINV		0x10000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define UARTCTRL_ORIE		0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define UARTCTRL_NEIE		0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define UARTCTRL_FEIE		0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define UARTCTRL_PEIE		0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define UARTCTRL_TIE		0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define UARTCTRL_TCIE		0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define UARTCTRL_RIE		0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define UARTCTRL_ILIE		0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define UARTCTRL_TE		0x00080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define UARTCTRL_RE		0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define UARTCTRL_RWU		0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define UARTCTRL_SBK		0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define UARTCTRL_MA1IE		0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define UARTCTRL_MA2IE		0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define UARTCTRL_IDLECFG	0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define UARTCTRL_LOOPS		0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define UARTCTRL_DOZEEN		0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define UARTCTRL_RSRC		0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define UARTCTRL_M		0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define UARTCTRL_WAKE		0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define UARTCTRL_ILT		0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define UARTCTRL_PE		0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define UARTCTRL_PT		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define UARTDATA_NOISY		0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define UARTDATA_PARITYE	0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define UARTDATA_FRETSC		0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define UARTDATA_RXEMPT		0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define UARTDATA_IDLINE		0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define UARTDATA_MASK		0x3ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define UARTMODIR_IREN		0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define UARTMODIR_TXCTSSRC	0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define UARTMODIR_TXCTSC	0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #define UARTMODIR_RXRTSE	0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define UARTMODIR_TXRTSPOL	0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #define UARTMODIR_TXRTSE	0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #define UARTMODIR_TXCTSE	0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #define UARTFIFO_TXEMPT		0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define UARTFIFO_RXEMPT		0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define UARTFIFO_TXOF		0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define UARTFIFO_RXUF		0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define UARTFIFO_TXFLUSH	0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define UARTFIFO_RXFLUSH	0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define UARTFIFO_TXOFE		0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define UARTFIFO_RXUFE		0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define UARTFIFO_TXFE		0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define UARTFIFO_FIFOSIZE_MASK	0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define UARTFIFO_TXSIZE_OFF	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define UARTFIFO_RXFE		0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define UARTFIFO_RXSIZE_OFF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define UARTFIFO_DEPTH(x)	(0x1 << ((x) ? ((x) + 1) : 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #define UARTWATER_COUNT_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define UARTWATER_TXCNT_OFF	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define UARTWATER_RXCNT_OFF	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define UARTWATER_WATER_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) #define UARTWATER_TXWATER_OFF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define UARTWATER_RXWATER_OFF	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define DMA_RX_TIMEOUT		(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define DRIVER_NAME	"fsl-lpuart"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #define DEV_NAME	"ttyLP"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define UART_NR		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) /* IMX lpuart has four extra unused regs located at the beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define IMX_REG_OFF	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static DEFINE_IDA(fsl_lpuart_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) enum lpuart_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	VF610_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	LS1021A_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	LS1028A_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	IMX7ULP_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	IMX8QXP_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) struct lpuart_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	struct uart_port	port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	enum lpuart_type	devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct clk		*ipg_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct clk		*baud_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	unsigned int		txfifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	unsigned int		rxfifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	bool			lpuart_dma_tx_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	bool			lpuart_dma_rx_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct dma_chan		*dma_tx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct dma_chan		*dma_rx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct dma_async_tx_descriptor  *dma_tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct dma_async_tx_descriptor  *dma_rx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	dma_cookie_t		dma_tx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	dma_cookie_t		dma_rx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	unsigned int		dma_tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	unsigned int		dma_rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	bool			dma_tx_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	unsigned int		dma_rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	struct timer_list	lpuart_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct scatterlist	rx_sgl, tx_sgl[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct circ_buf		rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	int			rx_dma_rng_buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	unsigned int		dma_tx_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	wait_queue_head_t	dma_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	bool			id_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) struct lpuart_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	enum lpuart_type devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	char iotype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	u8 reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) static const struct lpuart_soc_data vf_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	.devtype = VF610_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	.iotype = UPIO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static const struct lpuart_soc_data ls1021a_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	.devtype = LS1021A_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	.iotype = UPIO_MEM32BE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static const struct lpuart_soc_data ls1028a_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	.devtype = LS1028A_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	.iotype = UPIO_MEM32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static struct lpuart_soc_data imx7ulp_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	.devtype = IMX7ULP_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	.iotype = UPIO_MEM32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	.reg_off = IMX_REG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static struct lpuart_soc_data imx8qxp_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	.devtype = IMX8QXP_LPUART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	.iotype = UPIO_MEM32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	.reg_off = IMX_REG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static const struct of_device_id lpuart_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	{ .compatible = "fsl,vf610-lpuart",	.data = &vf_data, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	{ .compatible = "fsl,ls1021a-lpuart",	.data = &ls1021a_data, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	{ .compatible = "fsl,ls1028a-lpuart",	.data = &ls1028a_data, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	{ .compatible = "fsl,imx7ulp-lpuart",	.data = &imx7ulp_data, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	{ .compatible = "fsl,imx8qxp-lpuart",	.data = &imx8qxp_data, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	{ /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) /* Forward declare this for the dma callbacks*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static void lpuart_dma_tx_complete(void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	return (sport->devtype == LS1021A_LPUART ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		sport->devtype == LS1028A_LPUART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return sport->devtype == IMX8QXP_LPUART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static inline u32 lpuart32_read(struct uart_port *port, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	switch (port->iotype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	case UPIO_MEM32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		return readl(port->membase + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	case UPIO_MEM32BE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		return ioread32be(port->membase + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static inline void lpuart32_write(struct uart_port *port, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				  u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	switch (port->iotype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	case UPIO_MEM32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		writel(val, port->membase + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	case UPIO_MEM32BE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		iowrite32be(val, port->membase + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int __lpuart_enable_clks(struct lpuart_port *sport, bool is_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (is_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		ret = clk_prepare_enable(sport->ipg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		ret = clk_prepare_enable(sport->baud_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			clk_disable_unprepare(sport->ipg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		clk_disable_unprepare(sport->baud_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		clk_disable_unprepare(sport->ipg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (is_imx8qxp_lpuart(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		return clk_get_rate(sport->baud_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return clk_get_rate(sport->ipg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) #define lpuart_enable_clks(x)	__lpuart_enable_clks(x, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #define lpuart_disable_clks(x)	__lpuart_enable_clks(x, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static void lpuart_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	temp = readb(port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	writeb(temp, port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) static void lpuart32_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	temp = lpuart32_read(port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	lpuart32_write(port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static void lpuart_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	temp = readb(port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static void lpuart32_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	temp = lpuart32_read(port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static void lpuart_dma_tx(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct circ_buf *xmit = &sport->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct scatterlist *sgl = sport->tx_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct device *dev = sport->port.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct dma_chan *chan = sport->dma_tx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (sport->dma_tx_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (xmit->tail < xmit->head || xmit->head == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		sport->dma_tx_nents = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		sport->dma_tx_nents = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		sg_init_table(sgl, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		sg_set_buf(sgl, xmit->buf + xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 				UART_XMIT_SIZE - xmit->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		sg_set_buf(sgl + 1, xmit->buf, xmit->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		dev_err(dev, "DMA mapping error for TX.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 					ret, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 					DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (!sport->dma_tx_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			      DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		dev_err(dev, "Cannot prepare TX slave DMA!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	sport->dma_tx_desc->callback = lpuart_dma_tx_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	sport->dma_tx_desc->callback_param = sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	sport->dma_tx_in_progress = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static bool lpuart_stopped_or_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static void lpuart_dma_tx_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct lpuart_port *sport = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct scatterlist *sgl = &sport->tx_sgl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct circ_buf *xmit = &sport->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct dma_chan *chan = sport->dma_tx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	sport->port.icount.tx += sport->dma_tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	sport->dma_tx_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		uart_write_wakeup(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (waitqueue_active(&sport->dma_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		wake_up(&sport->dma_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (!lpuart_stopped_or_empty(&sport->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		lpuart_dma_tx(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	switch (sport->port.iotype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	case UPIO_MEM32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return sport->port.mapbase + UARTDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	case UPIO_MEM32BE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return sport->port.mapbase + UARTDATA + sizeof(u32) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return sport->port.mapbase + UARTDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static int lpuart_dma_tx_request(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 					struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct dma_slave_config dma_tx_sconfig = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	dma_tx_sconfig.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		dev_err(sport->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 				"DMA slave config failed, err = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static bool lpuart_is_32(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	return sport->port.iotype == UPIO_MEM32 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	       sport->port.iotype ==  UPIO_MEM32BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static void lpuart_flush_buffer(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct dma_chan *chan = sport->dma_tx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (sport->lpuart_dma_tx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		if (sport->dma_tx_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 				sport->dma_tx_nents, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			sport->dma_tx_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		val = lpuart32_read(&sport->port, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		lpuart32_write(&sport->port, val, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		val = readb(sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		writeb(val, sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void lpuart_wait_bit_set(struct uart_port *port, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				u8 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	while (!(readb(port->membase + offset) & bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				  u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	while (!(lpuart32_read(port, offset) & bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) #if defined(CONFIG_CONSOLE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static int lpuart_poll_init(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 					struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	sport->port.fifosize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	/* Disable Rx & Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	writeb(0, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	temp = readb(sport->port.membase + UARTPFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	/* Enable Rx and Tx FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			sport->port.membase + UARTPFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/* flush Tx and Rx FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	/* explicitly clear RDRF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		readb(sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	writeb(0, sport->port.membase + UARTTWFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	writeb(1, sport->port.membase + UARTRWFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	/* Enable Rx and Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	writeb(c, port->membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) static int lpuart_poll_get_char(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return NO_POLL_CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	return readb(port->membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static int lpuart32_poll_init(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	sport->port.fifosize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/* Disable Rx & Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	lpuart32_write(&sport->port, 0, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	temp = lpuart32_read(&sport->port, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	/* Enable Rx and Tx FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	/* flush Tx and Rx FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* explicitly clear RDRF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		lpuart32_read(&sport->port, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* Enable Rx and Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	lpuart32_write(port, c, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static int lpuart32_poll_get_char(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		return NO_POLL_CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return lpuart32_read(port, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct circ_buf *xmit = &sport->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (sport->port.x_char) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		writeb(sport->port.x_char, sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		sport->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		sport->port.x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (lpuart_stopped_or_empty(&sport->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		lpuart_stop_tx(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	while (!uart_circ_empty(xmit) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		(readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		sport->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		uart_write_wakeup(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		lpuart_stop_tx(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct circ_buf *xmit = &sport->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	unsigned long txcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (sport->port.x_char) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		sport->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		sport->port.x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (lpuart_stopped_or_empty(&sport->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		lpuart32_stop_tx(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	txcnt = lpuart32_read(&sport->port, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	txcnt = txcnt >> UARTWATER_TXCNT_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	txcnt &= UARTWATER_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		sport->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		txcnt = lpuart32_read(&sport->port, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		txcnt = txcnt >> UARTWATER_TXCNT_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		txcnt &= UARTWATER_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		uart_write_wakeup(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		lpuart32_stop_tx(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static void lpuart_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	temp = readb(port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (sport->lpuart_dma_tx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (!lpuart_stopped_or_empty(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			lpuart_dma_tx(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			lpuart_transmit_buffer(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static void lpuart32_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (sport->lpuart_dma_tx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		if (!lpuart_stopped_or_empty(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			lpuart_dma_tx(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		temp = lpuart32_read(port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			lpuart32_transmit_buffer(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) /* return TIOCSER_TEMT when transmitter is not busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static unsigned int lpuart_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	unsigned char sr1 = readb(port->membase + UARTSR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	unsigned char sfifo = readb(port->membase + UARTSFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (sport->dma_tx_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (sr1 & UARTSR1_TC && sfifo & UARTSFIFO_TXEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		return TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) static unsigned int lpuart32_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	unsigned long stat = lpuart32_read(port, UARTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	unsigned long sfifo = lpuart32_read(port, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (sport->dma_tx_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void lpuart_txint(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	lpuart_transmit_buffer(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static void lpuart_rxint(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	unsigned int flg, ignored = 0, overrun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct tty_port *port = &sport->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	unsigned char rx, sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		flg = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		sport->port.icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		 * to clear the FE, OR, NF, FE, PE flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		 * read SR1 then read DR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		sr = readb(sport->port.membase + UARTSR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		rx = readb(sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		if (sr & (UARTSR1_PE | UARTSR1_OR | UARTSR1_FE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			if (sr & UARTSR1_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				sport->port.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			else if (sr & UARTSR1_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				sport->port.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			if (sr & UARTSR1_OR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 				overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			if (sr & sport->port.ignore_status_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 				if (++ignored > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			sr &= sport->port.read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			if (sr & UARTSR1_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				flg = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			else if (sr & UARTSR1_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				flg = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			if (sr & UARTSR1_OR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				flg = TTY_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			sport->port.sysrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		tty_insert_flip_char(port, rx, flg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (overrun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		sport->port.icount.overrun += overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 * Overruns cause FIFO pointers to become missaligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		 * Flushing the receive FIFO reinitializes the pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		writeb(UARTSFIFO_RXOF, sport->port.membase + UARTSFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static void lpuart32_txint(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	lpuart32_transmit_buffer(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static void lpuart32_rxint(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	unsigned int flg, ignored = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	struct tty_port *port = &sport->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	unsigned long rx, sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		flg = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		sport->port.icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * to clear the FE, OR, NF, FE, PE flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * read STAT then read DATA reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		sr = lpuart32_read(&sport->port, UARTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		rx = lpuart32_read(&sport->port, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		rx &= 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			if (sr & UARTSTAT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				sport->port.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			else if (sr & UARTSTAT_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				sport->port.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			if (sr & UARTSTAT_OR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				sport->port.icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			if (sr & sport->port.ignore_status_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				if (++ignored > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			sr &= sport->port.read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			if (sr & UARTSTAT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				flg = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			else if (sr & UARTSTAT_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				flg = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			if (sr & UARTSTAT_OR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				flg = TTY_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			sport->port.sysrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		tty_insert_flip_char(port, rx, flg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) static irqreturn_t lpuart_int(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct lpuart_port *sport = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	unsigned char sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	sts = readb(sport->port.membase + UARTSR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* SysRq, using dma, check for linebreak by framing err. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (sts & UARTSR1_FE && sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		readb(sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		uart_handle_break(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		/* linebreak produces some garbage, removing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (sts & UARTSR1_RDRF && !sport->lpuart_dma_rx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		lpuart_rxint(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		lpuart_txint(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static irqreturn_t lpuart32_int(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct lpuart_port *sport = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	unsigned long sts, rxcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	sts = lpuart32_read(&sport->port, UARTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	rxcount = lpuart32_read(&sport->port, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	rxcount = rxcount >> UARTWATER_RXCNT_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		lpuart32_rxint(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		lpuart32_txint(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	lpuart32_write(&sport->port, sts, UARTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static inline void lpuart_handle_sysrq_chars(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 					     unsigned char *p, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (*p && uart_handle_sysrq_char(port, *p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static void lpuart_handle_sysrq(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct circ_buf *ring = &sport->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (ring->head < ring->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		count = sport->rx_sgl.length - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		lpuart_handle_sysrq_chars(&sport->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 					  ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		ring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (ring->head > ring->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		count = ring->head - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		lpuart_handle_sysrq_chars(&sport->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 					  ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		ring->tail = ring->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct tty_port *port = &sport->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	enum dma_status dmastat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct dma_chan *chan = sport->dma_rx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct circ_buf *ring = &sport->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			/* Read DR to clear the error flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			lpuart32_read(&sport->port, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if (sr & UARTSTAT_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				sport->port.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			else if (sr & UARTSTAT_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				sport->port.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		unsigned char sr = readb(sport->port.membase + UARTSR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		if (sr & (UARTSR1_PE | UARTSR1_FE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			unsigned char cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			/* Disable receiver during this operation... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			cr2 = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			cr2 &= ~UARTCR2_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			writeb(cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			/* Read DR to clear the error flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			readb(sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			if (sr & UARTSR1_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				sport->port.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			else if (sr & UARTSR1_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 				sport->port.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			 * At this point parity/framing error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			 * cleared However, since the DMA already read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			 * the data register and we had to read it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			 * again after reading the status register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			 * properly clear the flags, the FIFO actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			 * underflowed... This requires a clearing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			 * the FIFO...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			if (readb(sport->port.membase + UARTSFIFO) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			    UARTSFIFO_RXUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				writeb(UARTSFIFO_RXUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				       sport->port.membase + UARTSFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				writeb(UARTCFIFO_RXFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				       sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			cr2 |= UARTCR2_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			writeb(cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	async_tx_ack(sport->dma_rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (dmastat == DMA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/* CPU claims ownership of RX DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 * ring->head points to the end of data already written by the DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 * ring->tail points to the beginning of data to be read by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * The current transfer size should not be larger than the dma buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 * length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	ring->head = sport->rx_sgl.length - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	BUG_ON(ring->head > sport->rx_sgl.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	 * Silent handling of keys pressed in the sysrq timeframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (sport->port.sysrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		lpuart_handle_sysrq(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	 * At this point ring->head may point to the first byte right after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	 * last byte of the dma buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 * 0 <= ring->head <= sport->rx_sgl.length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	 * However ring->tail must always points inside the dma buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * 0 <= ring->tail <= sport->rx_sgl.length - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	 * Since we use a ring buffer, we have to handle the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	 * where head is lower than tail. In such a case, we first read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * tail to the end of the buffer then reset tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (ring->head < ring->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		count = sport->rx_sgl.length - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		tty_insert_flip_string(port, ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		ring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		sport->port.icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/* Finally we read data from tail to head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (ring->tail < ring->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		count = ring->head - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		tty_insert_flip_string(port, ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		/* Wrap ring->head if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		if (ring->head >= sport->rx_sgl.length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		ring->tail = ring->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		sport->port.icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static void lpuart_dma_rx_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	struct lpuart_port *sport = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	lpuart_copy_rx_to_tty(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void lpuart_timer_func(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	lpuart_copy_rx_to_tty(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct dma_slave_config dma_rx_sconfig = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct circ_buf *ring = &sport->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	int ret, nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	int bits, baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	struct tty_port *port = &sport->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct tty_struct *tty = port->tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	struct ktermios *termios = &tty->termios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	struct dma_chan *chan = sport->dma_rx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	baud = tty_get_baud_rate(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (termios->c_cflag & PARENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		bits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	 * Calculate length of one DMA buffer size to keep latency below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	 * 10ms at any baud rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud /  bits / 1000) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (sport->rx_dma_rng_buf_len < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		sport->rx_dma_rng_buf_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!ring->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			  DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (!nent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		dev_err(sport->port.dev, "DMA Rx mapping error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	dma_rx_sconfig.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		dev_err(sport->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 				"DMA Rx slave config failed, err = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 				 sg_dma_address(&sport->rx_sgl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 				 sport->rx_sgl.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				 sport->rx_sgl.length / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				 DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				 DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	if (!sport->dma_rx_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	sport->dma_rx_desc->callback_param = sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		       sport->port.membase + UARTCR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void lpuart_dma_rx_free(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 					struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	struct dma_chan *chan = sport->dma_rx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	kfree(sport->rx_ring.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	sport->rx_ring.tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	sport->rx_ring.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	sport->dma_rx_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	sport->dma_rx_cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static int lpuart_config_rs485(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			struct serial_rs485 *rs485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	u8 modem = readb(sport->port.membase + UARTMODEM) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	writeb(modem, sport->port.membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/* clear unsupported configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	rs485->delay_rts_before_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	rs485->delay_rts_after_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	rs485->flags &= ~SER_RS485_RX_DURING_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (rs485->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		/* Enable auto RS-485 RTS mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		modem |= UARTMODEM_TXRTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		 * RTS needs to be logic HIGH either during transfer _or_ after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		 * transfer, other variants are not supported by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 				SER_RS485_RTS_AFTER_SEND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			rs485->flags |= SER_RS485_RTS_ON_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		if (rs485->flags & SER_RS485_RTS_ON_SEND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				rs485->flags & SER_RS485_RTS_AFTER_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		 * The hardware defaults to RTS logic HIGH while transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 * Switch polarity in case RTS shall be logic HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		 * after transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		 * Note: UART is assumed to be active high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		if (rs485->flags & SER_RS485_RTS_ON_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			modem &= ~UARTMODEM_TXRTSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			modem |= UARTMODEM_TXRTSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	/* Store the new configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	sport->port.rs485 = *rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	writeb(modem, sport->port.membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static int lpuart32_config_rs485(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			struct serial_rs485 *rs485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 				& ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	lpuart32_write(&sport->port, modem, UARTMODIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* clear unsupported configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	rs485->delay_rts_before_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	rs485->delay_rts_after_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	rs485->flags &= ~SER_RS485_RX_DURING_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (rs485->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		/* Enable auto RS-485 RTS mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		modem |= UARTMODEM_TXRTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		 * RTS needs to be logic HIGH either during transfer _or_ after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		 * transfer, other variants are not supported by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 				SER_RS485_RTS_AFTER_SEND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			rs485->flags |= SER_RS485_RTS_ON_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		if (rs485->flags & SER_RS485_RTS_ON_SEND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 				rs485->flags & SER_RS485_RTS_AFTER_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		 * The hardware defaults to RTS logic HIGH while transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		 * Switch polarity in case RTS shall be logic HIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		 * after transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		 * Note: UART is assumed to be active high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		if (rs485->flags & SER_RS485_RTS_ON_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			modem &= ~UARTMODEM_TXRTSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			modem |= UARTMODEM_TXRTSPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	/* Store the new configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	sport->port.rs485 = *rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	lpuart32_write(&sport->port, modem, UARTMODIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static unsigned int lpuart_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	unsigned int temp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	unsigned char reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	reg = readb(port->membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (reg & UARTMODEM_TXCTSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		temp |= TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if (reg & UARTMODEM_RXRTSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		temp |= TIOCM_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static unsigned int lpuart32_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	struct lpuart_port *sport = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 				struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	/* Make sure RXRTSE bit is not set when RS485 is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (!(sport->port.rs485.flags & SER_RS485_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		temp = readb(sport->port.membase + UARTMODEM) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		if (mctrl & TIOCM_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			temp |= UARTMODEM_RXRTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		if (mctrl & TIOCM_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			temp |= UARTMODEM_TXCTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		writeb(temp, port->membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static void lpuart_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (break_state != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		temp |= UARTCR2_SBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	writeb(temp, port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static void lpuart32_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (break_state != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		temp |= UARTCTRL_SBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	lpuart32_write(port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void lpuart_setup_watermark(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	unsigned char val, cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	unsigned char cr2_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	cr2 = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	cr2_saved = cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_TE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			UARTCR2_RIE | UARTCR2_RE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	writeb(cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	val = readb(sport->port.membase + UARTPFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			sport->port.membase + UARTPFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	/* flush Tx and Rx FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			sport->port.membase + UARTCFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	/* explicitly clear RDRF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		readb(sport->port.membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	writeb(0, sport->port.membase + UARTTWFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	writeb(1, sport->port.membase + UARTRWFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	/* Restore cr2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	writeb(cr2_saved, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	unsigned char cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	lpuart_setup_watermark(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	cr2 = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	cr2 |= UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	writeb(cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void lpuart32_setup_watermark(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	unsigned long val, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	unsigned long ctrl_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	ctrl = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	ctrl_saved = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_TE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			UARTCTRL_RIE | UARTCTRL_RE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	lpuart32_write(&sport->port, ctrl, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	/* enable FIFO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	val = lpuart32_read(&sport->port, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	lpuart32_write(&sport->port, val, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	/* set the watermark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	lpuart32_write(&sport->port, val, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	/* Restore cr2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	lpuart32_setup_watermark(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	temp = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	lpuart32_write(&sport->port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static void rx_dma_timer_init(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	add_timer(&sport->lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void lpuart_request_dma(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (IS_ERR(sport->dma_tx_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		dev_dbg_once(sport->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			     "DMA tx channel request failed, operating without tx DMA (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			     PTR_ERR(sport->dma_tx_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		sport->dma_tx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (IS_ERR(sport->dma_rx_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		dev_dbg_once(sport->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			     "DMA rx channel request failed, operating without rx DMA (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			     PTR_ERR(sport->dma_rx_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		sport->dma_rx_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static void lpuart_tx_dma_startup(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	u32 uartbaud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	if (uart_console(&sport->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if (!sport->dma_tx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	ret = lpuart_dma_tx_request(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	init_waitqueue_head(&sport->dma_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	sport->lpuart_dma_tx_use = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		uartbaud = lpuart32_read(&sport->port, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		lpuart32_write(&sport->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			       uartbaud | UARTBAUD_TDMAE, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		writeb(readb(sport->port.membase + UARTCR5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		       UARTCR5_TDMAS, sport->port.membase + UARTCR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	sport->lpuart_dma_tx_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static void lpuart_rx_dma_startup(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	unsigned char cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (uart_console(&sport->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (!sport->dma_rx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	ret = lpuart_start_rx_dma(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	/* set Rx DMA timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	if (!sport->dma_rx_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		sport->dma_rx_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	sport->lpuart_dma_rx_use = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	rx_dma_timer_init(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (sport->port.has_sysrq && !lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		cr3 = readb(sport->port.membase + UARTCR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		cr3 |= UARTCR3_FEIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		writeb(cr3, sport->port.membase + UARTCR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	sport->lpuart_dma_rx_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int lpuart_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/* determine FIFO size and enable FIFO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	temp = readb(sport->port.membase + UARTPFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 					    UARTPFIFO_FIFOSIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	sport->port.fifosize = sport->txfifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 					    UARTPFIFO_FIFOSIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	lpuart_request_dma(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	lpuart_setup_watermark_enable(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	lpuart_rx_dma_startup(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	lpuart_tx_dma_startup(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static void lpuart32_configure(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	if (sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		/* RXWATER must be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		temp = lpuart32_read(&sport->port, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		lpuart32_write(&sport->port, temp, UARTWATER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	temp = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (!sport->lpuart_dma_rx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		temp |= UARTCTRL_RIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	if (!sport->lpuart_dma_tx_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		temp |= UARTCTRL_TIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	lpuart32_write(&sport->port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static int lpuart32_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	/* determine FIFO size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	temp = lpuart32_read(&sport->port, UARTFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 					    UARTFIFO_FIFOSIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	sport->port.fifosize = sport->txfifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 					    UARTFIFO_FIFOSIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	 * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	 * Although they support the RX/TXSIZE fields, their encoding is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 * different. Eg the reference manual states 0b101 is 16 words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (is_layerscape_lpuart(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		sport->rxfifo_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		sport->txfifo_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		sport->port.fifosize = sport->txfifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	lpuart_request_dma(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	lpuart32_setup_watermark_enable(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	lpuart_rx_dma_startup(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	lpuart_tx_dma_startup(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	lpuart32_configure(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static void lpuart_dma_shutdown(struct lpuart_port *sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	if (sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		del_timer_sync(&sport->lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		lpuart_dma_rx_free(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (sport->lpuart_dma_tx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		if (wait_event_interruptible(sport->dma_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			!sport->dma_tx_in_progress) != false) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			sport->dma_tx_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			dmaengine_terminate_all(sport->dma_tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (sport->dma_tx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		dma_release_channel(sport->dma_tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	if (sport->dma_rx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		dma_release_channel(sport->dma_rx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static void lpuart_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	unsigned char temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	/* disable Rx/Tx and interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	temp = readb(port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	temp &= ~(UARTCR2_TE | UARTCR2_RE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	writeb(temp, port->membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	lpuart_dma_shutdown(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static void lpuart32_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	struct lpuart_port *sport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	/* disable Rx/Tx and interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	temp = lpuart32_read(port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	lpuart32_write(port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	lpuart_dma_shutdown(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		   struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	unsigned int  baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	unsigned int sbr, brfa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	old_cr2 = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	cr3 = readb(sport->port.membase + UARTCR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	cr4 = readb(sport->port.membase + UARTCR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	bdh = readb(sport->port.membase + UARTBDH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	modem = readb(sport->port.membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	 * only support CS8 and CS7, and for CS7 must enable PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	 * supported mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 *  - (7,e/o,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	 *  - (8,n,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	 *  - (8,m/s,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	 *  - (8,e/o,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	while ((termios->c_cflag & CSIZE) != CS8 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		(termios->c_cflag & CSIZE) != CS7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		termios->c_cflag &= ~CSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		termios->c_cflag |= old_csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		old_csize = CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if ((termios->c_cflag & CSIZE) == CS8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		(termios->c_cflag & CSIZE) == CS7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		cr1 = old_cr1 & ~UARTCR1_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		if ((termios->c_cflag & CSIZE) != CS8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			termios->c_cflag &= ~CSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			termios->c_cflag |= CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		cr1 |= UARTCR1_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	 * When auto RS-485 RTS mode is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	 * hardware flow control need to be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	if (sport->port.rs485.flags & SER_RS485_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		termios->c_cflag &= ~CRTSCTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	if (termios->c_cflag & CRTSCTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	termios->c_cflag &= ~CSTOPB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	/* parity must be enabled when CS7 to match 8-bits format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	if ((termios->c_cflag & CSIZE) == CS7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		termios->c_cflag |= PARENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (termios->c_cflag & PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			cr1 &= ~UARTCR1_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 				cr3 |= UARTCR3_T8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 				cr3 &= ~UARTCR3_T8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			cr1 |= UARTCR1_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			if ((termios->c_cflag & CSIZE) == CS8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 				cr1 |= UARTCR1_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 				cr1 |= UARTCR1_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 				cr1 &= ~UARTCR1_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		cr1 &= ~UARTCR1_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	/* ask the core to calculate the divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	 * Need to update the Ring buffer length according to the selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	 * baud rate and restart Rx DMA path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * Since timer function acqures sport->port.lock, need to stop before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (old && sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		del_timer_sync(&sport->lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		lpuart_dma_rx_free(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	sport->port.read_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		sport->port.read_status_mask |= UARTSR1_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	/* characters to ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	sport->port.ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		sport->port.ignore_status_mask |= UARTSR1_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		sport->port.ignore_status_mask |= UARTSR1_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		 * if we're ignoring parity and break indicators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		 * ignore overruns too (for real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			sport->port.ignore_status_mask |= UARTSR1_OR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	/* update the per-port timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	uart_update_timeout(port, termios->c_cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	/* wait transmit engin complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	/* disable transmit and receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	sbr = sport->port.uartclk / (16 * baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	bdh &= ~UARTBDH_SBR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	bdh |= (sbr >> 8) & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	cr4 &= ~UARTCR4_BRFA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	brfa &= UARTCR4_BRFA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	writeb(cr4 | brfa, sport->port.membase + UARTCR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	writeb(bdh, sport->port.membase + UARTBDH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	writeb(cr3, sport->port.membase + UARTCR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	writeb(cr1, sport->port.membase + UARTCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	writeb(modem, sport->port.membase + UARTMODEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	/* restore control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	writeb(old_cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	if (old && sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		if (!lpuart_start_rx_dma(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			rx_dma_timer_init(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			sport->lpuart_dma_rx_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static void __lpuart32_serial_setbrg(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 				     unsigned int baudrate, bool use_rx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 				     bool use_tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	u32 clk = port->uartclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	 * The idea is to use the best OSR (over-sampling rate) possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	 * Note, OSR is typically hard-set to 16 in other LPUART instantiations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	 * Loop to find the best OSR value possible, one that generates minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	 * baud_diff iterate through the rest of the supported values of OSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	 * Calculation Formula:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	 *  Baud Rate = baud clock / ((OSR+1) × SBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	baud_diff = baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	osr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	sbr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		/* calculate the temporary sbr value  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		tmp_sbr = (clk / (baudrate * tmp_osr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		if (tmp_sbr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			tmp_sbr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		 * calculate the baud rate difference based on the temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		 * osr and sbr values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		/* select best values between sbr and sbr+1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		tmp = clk / (tmp_osr * (tmp_sbr + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		if (tmp_diff > (baudrate - tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			tmp_diff = baudrate - tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			tmp_sbr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		if (tmp_sbr > UARTBAUD_SBR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (tmp_diff <= baud_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			baud_diff = tmp_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			osr = tmp_osr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			sbr = tmp_sbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			if (!baud_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	/* handle buadrate outside acceptable rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	if (baud_diff > ((baudrate / 100) * 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		dev_warn(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			 "unacceptable baud rate difference of more than 3%%\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	tmp = lpuart32_read(port, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if ((osr > 3) && (osr < 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		tmp |= UARTBAUD_BOTHEDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	tmp &= ~UARTBAUD_SBR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	tmp |= sbr & UARTBAUD_SBR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	if (!use_rx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		tmp &= ~UARTBAUD_RDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (!use_tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		tmp &= ~UARTBAUD_TDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	lpuart32_write(port, tmp, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static void lpuart32_serial_setbrg(struct lpuart_port *sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 				   unsigned int baudrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	__lpuart32_serial_setbrg(&sport->port, baudrate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 				 sport->lpuart_dma_rx_use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 				 sport->lpuart_dma_tx_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		   struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	unsigned long ctrl, old_ctrl, modem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	unsigned int  baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	modem = lpuart32_read(&sport->port, UARTMODIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	 * only support CS8 and CS7, and for CS7 must enable PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	 * supported mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	 *  - (7,e/o,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	 *  - (8,n,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	 *  - (8,m/s,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	 *  - (8,e/o,1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	while ((termios->c_cflag & CSIZE) != CS8 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		(termios->c_cflag & CSIZE) != CS7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		termios->c_cflag &= ~CSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		termios->c_cflag |= old_csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		old_csize = CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if ((termios->c_cflag & CSIZE) == CS8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		(termios->c_cflag & CSIZE) == CS7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		ctrl = old_ctrl & ~UARTCTRL_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		if ((termios->c_cflag & CSIZE) != CS8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			termios->c_cflag &= ~CSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			termios->c_cflag |= CS8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		ctrl |= UARTCTRL_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	 * When auto RS-485 RTS mode is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	 * hardware flow control need to be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	if (sport->port.rs485.flags & SER_RS485_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		termios->c_cflag &= ~CRTSCTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (termios->c_cflag & CRTSCTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		termios->c_cflag &= ~CRTSCTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (termios->c_cflag & CSTOPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		termios->c_cflag &= ~CSTOPB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	/* parity must be enabled when CS7 to match 8-bits format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if ((termios->c_cflag & CSIZE) == CS7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		termios->c_cflag |= PARENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	if ((termios->c_cflag & PARENB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			ctrl &= ~UARTCTRL_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 			ctrl |= UARTCTRL_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			ctrl |= UARTCTRL_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 			if ((termios->c_cflag & CSIZE) == CS8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 				ctrl |= UARTCTRL_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 			if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 				ctrl |= UARTCTRL_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 				ctrl &= ~UARTCTRL_PT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		ctrl &= ~UARTCTRL_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	/* ask the core to calculate the divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	 * Need to update the Ring buffer length according to the selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	 * baud rate and restart Rx DMA path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	 * Since timer function acqures sport->port.lock, need to stop before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (old && sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		del_timer_sync(&sport->lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		lpuart_dma_rx_free(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	sport->port.read_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		sport->port.read_status_mask |= UARTSTAT_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	/* characters to ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	sport->port.ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		sport->port.ignore_status_mask |= UARTSTAT_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		sport->port.ignore_status_mask |= UARTSTAT_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		 * if we're ignoring parity and break indicators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		 * ignore overruns too (for real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 			sport->port.ignore_status_mask |= UARTSTAT_OR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	/* update the per-port timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	uart_update_timeout(port, termios->c_cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	/* wait transmit engin complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	/* disable transmit and receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		       UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	lpuart32_serial_setbrg(sport, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	lpuart32_write(&sport->port, modem, UARTMODIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	lpuart32_write(&sport->port, ctrl, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	/* restore control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	if (old && sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		if (!lpuart_start_rx_dma(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 			rx_dma_timer_init(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			sport->lpuart_dma_rx_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static const char *lpuart_type(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	return "FSL_LPUART";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static void lpuart_release_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	/* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) static int lpuart_request_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	return  0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) /* configure/autoconfigure the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static void lpuart_config_port(struct uart_port *port, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	if (flags & UART_CONFIG_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		port->type = PORT_LPUART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) static int lpuart_verify_port(struct uart_port *port, struct serial_struct *ser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_LPUART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	if (port->irq != ser->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (ser->io_type != UPIO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	if (port->uartclk / 16 != ser->baud_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	if (port->iobase != ser->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	if (ser->hub6 != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static const struct uart_ops lpuart_pops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	.tx_empty	= lpuart_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	.set_mctrl	= lpuart_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	.get_mctrl	= lpuart_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	.stop_tx	= lpuart_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	.start_tx	= lpuart_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	.stop_rx	= lpuart_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	.break_ctl	= lpuart_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	.startup	= lpuart_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	.shutdown	= lpuart_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	.set_termios	= lpuart_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	.type		= lpuart_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	.request_port	= lpuart_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	.release_port	= lpuart_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	.config_port	= lpuart_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	.verify_port	= lpuart_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	.flush_buffer	= lpuart_flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) #if defined(CONFIG_CONSOLE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	.poll_init	= lpuart_poll_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	.poll_get_char	= lpuart_poll_get_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	.poll_put_char	= lpuart_poll_put_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static const struct uart_ops lpuart32_pops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	.tx_empty	= lpuart32_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	.set_mctrl	= lpuart32_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	.get_mctrl	= lpuart32_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	.stop_tx	= lpuart32_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	.start_tx	= lpuart32_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	.stop_rx	= lpuart32_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	.break_ctl	= lpuart32_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	.startup	= lpuart32_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	.shutdown	= lpuart32_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	.set_termios	= lpuart32_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	.type		= lpuart_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	.request_port	= lpuart_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	.release_port	= lpuart_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	.config_port	= lpuart_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	.verify_port	= lpuart_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	.flush_buffer	= lpuart_flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) #if defined(CONFIG_CONSOLE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	.poll_init	= lpuart32_poll_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	.poll_get_char	= lpuart32_poll_get_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	.poll_put_char	= lpuart32_poll_put_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static struct lpuart_port *lpuart_ports[UART_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) #ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) static void lpuart_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	writeb(ch, port->membase + UARTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static void lpuart32_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	lpuart32_write(port, ch, UARTDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) lpuart_console_write(struct console *co, const char *s, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct lpuart_port *sport = lpuart_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	unsigned char  old_cr2, cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	if (sport->port.sysrq || oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		locked = spin_trylock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	/* first save CR2 and then disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	cr2 |= UARTCR2_TE | UARTCR2_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	writeb(cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	uart_console_write(&sport->port, s, count, lpuart_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	/* wait for transmitter finish complete and restore CR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	writeb(old_cr2, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) lpuart32_console_write(struct console *co, const char *s, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	struct lpuart_port *sport = lpuart_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	unsigned long  old_cr, cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	if (sport->port.sysrq || oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		locked = spin_trylock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		spin_lock_irqsave(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	/* first save CR2 and then disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	cr |= UARTCTRL_TE | UARTCTRL_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	lpuart32_write(&sport->port, cr, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	uart_console_write(&sport->port, s, count, lpuart32_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	/* wait for transmitter finish complete and restore CR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	lpuart32_write(&sport->port, old_cr, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		spin_unlock_irqrestore(&sport->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)  * if the port was already initialised (eg, by a boot loader),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)  * try to determine the current setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) lpuart_console_get_options(struct lpuart_port *sport, int *baud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			   int *parity, int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	unsigned char cr, bdh, bdl, brfa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	unsigned int sbr, uartclk, baud_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	cr = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	cr &= UARTCR2_TE | UARTCR2_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	if (!cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	/* ok, the port was enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	cr = readb(sport->port.membase + UARTCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	*parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	if (cr & UARTCR1_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		if (cr & UARTCR1_PT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 			*parity = 'o';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 			*parity = 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (cr & UARTCR1_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		*bits = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		*bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	bdh = readb(sport->port.membase + UARTBDH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	bdh &= UARTBDH_SBR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	bdl = readb(sport->port.membase + UARTBDL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	sbr = bdh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	sbr <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	sbr |= bdl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	brfa = readb(sport->port.membase + UARTCR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	brfa &= UARTCR4_BRFA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	uartclk = lpuart_get_baud_clk_rate(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	 * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	baud_raw = uartclk / (16 * (sbr + brfa / 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	if (*baud != baud_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 				"from %d to %d\n", baud_raw, *baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			   int *parity, int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	unsigned long cr, bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	unsigned int sbr, uartclk, baud_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	cr = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	cr &= UARTCTRL_TE | UARTCTRL_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	if (!cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	/* ok, the port was enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	cr = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	*parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	if (cr & UARTCTRL_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		if (cr & UARTCTRL_PT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 			*parity = 'o';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 			*parity = 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (cr & UARTCTRL_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		*bits = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		*bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	bd = lpuart32_read(&sport->port, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	bd &= UARTBAUD_SBR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	if (!bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	sbr = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	uartclk = lpuart_get_baud_clk_rate(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	 * baud = mod_clk/(16*(sbr[13]+(brfa)/32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	baud_raw = uartclk / (16 * sbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	if (*baud != baud_raw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		dev_info(sport->port.dev, "Serial: Console lpuart rounded baud rate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 				"from %d to %d\n", baud_raw, *baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) static int __init lpuart_console_setup(struct console *co, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	struct lpuart_port *sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	int baud = 115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	int bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	int parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	int flow = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	 * check whether an invalid uart number has been specified, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	 * if so, search for the first available port that does have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	 * console support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	if (co->index == -1 || co->index >= ARRAY_SIZE(lpuart_ports))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		co->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	sport = lpuart_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	if (sport == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		uart_parse_options(options, &baud, &parity, &bits, &flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			lpuart32_console_get_options(sport, &baud, &parity, &bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			lpuart_console_get_options(sport, &baud, &parity, &bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		lpuart32_setup_watermark(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		lpuart_setup_watermark(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	return uart_set_options(&sport->port, co, baud, parity, bits, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) static struct uart_driver lpuart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) static struct console lpuart_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	.name		= DEV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	.write		= lpuart_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	.device		= uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	.setup		= lpuart_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	.flags		= CON_PRINTBUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	.index		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	.data		= &lpuart_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static struct console lpuart32_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	.name		= DEV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	.write		= lpuart32_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	.device		= uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	.setup		= lpuart_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	.flags		= CON_PRINTBUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	.index		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	.data		= &lpuart_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) static void lpuart_early_write(struct console *con, const char *s, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	struct earlycon_device *dev = con->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	uart_console_write(&dev->port, s, n, lpuart_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static void lpuart32_early_write(struct console *con, const char *s, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	struct earlycon_device *dev = con->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	uart_console_write(&dev->port, s, n, lpuart32_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) static int __init lpuart_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 					  const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	device->con->write = lpuart_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static int __init lpuart32_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 					  const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	if (device->port.iotype != UPIO_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 		device->port.iotype = UPIO_MEM32BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	device->con->write = lpuart32_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static int __init ls1028a_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 					      const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	device->port.iotype = UPIO_MEM32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	device->con->write = lpuart32_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	/* set the baudrate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	if (device->port.uartclk && device->baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		__lpuart32_serial_setbrg(&device->port, device->baud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 					 false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	/* enable transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	cr = lpuart32_read(&device->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	cr |= UARTCTRL_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	lpuart32_write(&device->port, cr, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 						   const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	device->port.iotype = UPIO_MEM32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	device->port.membase += IMX_REG_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	device->con->write = lpuart32_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) #define LPUART_CONSOLE	(&lpuart_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) #define LPUART32_CONSOLE	(&lpuart32_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) #define LPUART_CONSOLE	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) #define LPUART32_CONSOLE	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static struct uart_driver lpuart_reg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	.driver_name	= DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	.dev_name	= DEV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	.nr		= ARRAY_SIZE(lpuart_ports),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	.cons		= LPUART_CONSOLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static int lpuart_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	const struct of_device_id *of_id = of_match_device(lpuart_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 							   &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	const struct lpuart_soc_data *sdata = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	struct lpuart_port *sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	if (!sport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	if (IS_ERR(sport->port.membase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		return PTR_ERR(sport->port.membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	sport->port.membase += sdata->reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	sport->port.mapbase = res->start + sdata->reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	sport->port.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	sport->port.type = PORT_LPUART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	sport->devtype = sdata->devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	sport->port.irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	sport->port.iotype = sdata->iotype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		sport->port.ops = &lpuart32_pops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		sport->port.ops = &lpuart_pops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_FSL_LPUART_CONSOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	sport->port.flags = UPF_BOOT_AUTOCONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		sport->port.rs485_config = lpuart32_config_rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		sport->port.rs485_config = lpuart_config_rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	if (IS_ERR(sport->ipg_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		ret = PTR_ERR(sport->ipg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		dev_err(&pdev->dev, "failed to get uart ipg clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	sport->baud_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	if (is_imx8qxp_lpuart(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		sport->baud_clk = devm_clk_get(&pdev->dev, "baud");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		if (IS_ERR(sport->baud_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 			ret = PTR_ERR(sport->baud_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 			dev_err(&pdev->dev, "failed to get uart baud clk: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	ret = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 			dev_err(&pdev->dev, "port line is full, add device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		sport->id_allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	if (ret >= ARRAY_SIZE(lpuart_ports)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		dev_err(&pdev->dev, "serial%d out of range\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		goto failed_out_of_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	sport->port.line = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	ret = lpuart_enable_clks(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 		goto failed_clock_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	lpuart_ports[sport->port.line] = sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	platform_set_drvdata(pdev, &sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		lpuart_reg.cons = LPUART32_CONSOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 					DRIVER_NAME, sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		lpuart_reg.cons = LPUART_CONSOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 					DRIVER_NAME, sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		goto failed_irq_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	ret = uart_add_one_port(&lpuart_reg, &sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		goto failed_attach_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	ret = uart_get_rs485_mode(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		goto failed_get_rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	if (sport->port.rs485.delay_rts_before_send ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	    sport->port.rs485.delay_rts_after_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	sport->port.rs485_config(&sport->port, &sport->port.rs485);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) failed_get_rs485:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) failed_attach_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) failed_irq_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	lpuart_disable_clks(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) failed_clock_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) failed_out_of_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	if (sport->id_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) static int lpuart_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	struct lpuart_port *sport = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	uart_remove_one_port(&lpuart_reg, &sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	if (sport->id_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	lpuart_disable_clks(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (sport->dma_tx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		dma_release_channel(sport->dma_tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	if (sport->dma_rx_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		dma_release_channel(sport->dma_rx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) static int __maybe_unused lpuart_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	struct lpuart_port *sport = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	bool irq_wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		/* disable Rx/Tx and interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		temp = lpuart32_read(&sport->port, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		lpuart32_write(&sport->port, temp, UARTCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		/* disable Rx/Tx and interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		temp = readb(sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		writeb(temp, sport->port.membase + UARTCR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	uart_suspend_port(&lpuart_reg, &sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	/* uart_suspend_port() might set wakeup flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		 * EDMA driver during suspend will forcefully release any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		 * non-idle DMA channels. If port wakeup is enabled or if port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		 * is console port or 'no_console_suspend' is set the Rx DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		 * cannot resume as as expected, hence gracefully release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		 * Rx DMA path before suspend and start Rx DMA path on resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		if (irq_wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			del_timer_sync(&sport->lpuart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			lpuart_dma_rx_free(&sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		/* Disable Rx DMA to use UART port as wakeup source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		if (lpuart_is_32(sport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 			temp = lpuart32_read(&sport->port, UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 				       UARTBAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 			writeb(readb(sport->port.membase + UARTCR5) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 			       ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	if (sport->lpuart_dma_tx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		sport->dma_tx_in_progress = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		dmaengine_terminate_all(sport->dma_tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	if (sport->port.suspended && !irq_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		lpuart_disable_clks(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) static int __maybe_unused lpuart_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	struct lpuart_port *sport = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	if (sport->port.suspended && !irq_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		lpuart_enable_clks(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		lpuart32_setup_watermark_enable(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		lpuart_setup_watermark_enable(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	if (sport->lpuart_dma_rx_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		if (irq_wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 			if (!lpuart_start_rx_dma(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 				rx_dma_timer_init(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 				sport->lpuart_dma_rx_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	lpuart_tx_dma_startup(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	if (lpuart_is_32(sport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		lpuart32_configure(sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	uart_resume_port(&lpuart_reg, &sport->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) static struct platform_driver lpuart_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	.probe		= lpuart_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	.remove		= lpuart_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		.name	= "fsl-lpuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		.of_match_table = lpuart_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 		.pm	= &lpuart_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) static int __init lpuart_serial_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	int ret = uart_register_driver(&lpuart_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	ret = platform_driver_register(&lpuart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 		uart_unregister_driver(&lpuart_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static void __exit lpuart_serial_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	ida_destroy(&fsl_lpuart_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	platform_driver_unregister(&lpuart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	uart_unregister_driver(&lpuart_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) module_init(lpuart_serial_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) module_exit(lpuart_serial_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) MODULE_DESCRIPTION("Freescale lpuart serial port driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) MODULE_LICENSE("GPL v2");