Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * serial_tegra.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * High-speed serial driver for NVIDIA Tegra SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (c) 2012-2019, NVIDIA CORPORATION.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Author: Laxman Dewangan <ldewangan@nvidia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/serial_8250.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/serial_reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/termios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define TEGRA_UART_TYPE				"TEGRA_UART"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define TX_EMPTY_STATUS				(UART_LSR_TEMT | UART_LSR_THRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define BYTES_TO_ALIGN(x)			((unsigned long)(x) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define TEGRA_UART_RX_DMA_BUFFER_SIZE		4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define TEGRA_UART_LSR_TXFIFO_FULL		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define TEGRA_UART_IER_EORD			0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define TEGRA_UART_MCR_RTS_EN			0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define TEGRA_UART_MCR_CTS_EN			0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define TEGRA_UART_LSR_ANY			(UART_LSR_OE | UART_LSR_BI | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 						UART_LSR_PE | UART_LSR_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define TEGRA_UART_IRDA_CSR			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define TEGRA_UART_SIR_ENABLED			0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define TEGRA_UART_TX_PIO			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define TEGRA_UART_TX_DMA			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define TEGRA_UART_MIN_DMA			16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define TEGRA_UART_FIFO_SIZE			32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * Tx fifo trigger level setting in tegra uart is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * reverse way then conventional uart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define TEGRA_UART_TX_TRIG_16B			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define TEGRA_UART_TX_TRIG_8B			0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define TEGRA_UART_TX_TRIG_4B			0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define TEGRA_UART_TX_TRIG_1B			0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define TEGRA_UART_MAXIMUM			8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define TEGRA_UART_DEFAULT_BAUD			115200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define TEGRA_UART_DEFAULT_LSR			UART_LCR_WLEN8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /* Tx transfer mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define TEGRA_TX_PIO				1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define TEGRA_TX_DMA				2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define TEGRA_UART_FCR_IIR_FIFO_EN		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * tegra_uart_chip_data: SOC specific data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * @tx_fifo_full_status: Status flag available for checking tx fifo full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  *			Tegra30 does not allow this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * @support_clk_src_div: Clock source support the clock divider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) struct tegra_uart_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	bool	tx_fifo_full_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	bool	allow_txfifo_reset_fifo_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	bool	support_clk_src_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	bool	fifo_mode_enable_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	int	uart_max_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int	max_dma_burst_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	int	error_tolerance_low_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	int	error_tolerance_high_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) struct tegra_baud_tolerance {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	u32 lower_range_baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	u32 upper_range_baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	s32 tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) struct tegra_uart_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct uart_port			uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	const struct tegra_uart_chip_data	*cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct clk				*uart_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct reset_control			*rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	unsigned int				current_baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	/* Register shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned long				fcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	unsigned long				mcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	unsigned long				lcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	unsigned long				ier_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	bool					rts_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	int					tx_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	unsigned int				tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	bool					enable_modem_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	bool					rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	int					rx_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	int					symb_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct dma_chan				*rx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct dma_chan				*tx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	dma_addr_t				rx_dma_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	dma_addr_t				tx_dma_buf_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	unsigned char				*rx_dma_buf_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	unsigned char				*tx_dma_buf_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct dma_async_tx_descriptor		*tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct dma_async_tx_descriptor		*rx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	dma_cookie_t				tx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	dma_cookie_t				rx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	unsigned int				tx_bytes_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	unsigned int				rx_bytes_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct tegra_baud_tolerance		*baud_tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	int					n_adjustable_baud_rates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	int					required_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	int					configured_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	bool					use_rx_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	bool					use_tx_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	bool					rx_dma_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 					bool dma_to_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	return readl(tup->uport.membase + (reg << tup->uport.regshift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	writel(val, tup->uport.membase + (reg << tup->uport.regshift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return container_of(u, struct tegra_uart_port, uport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * RI - Ring detector is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 * CD/DCD/CAR - Carrier detect is always active. For some reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	 *	linux has different names for carrier detect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	 * DSR - Data Set ready is active as the hardware doesn't support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	 *	Don't know if the linux support this yet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	 * CTS - Clear to send. Always set to active, as the hardware handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 *	CTS automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (tup->enable_modem_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	return TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static void set_rts(struct tegra_uart_port *tup, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	unsigned long mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	mcr = tup->mcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		mcr |= TEGRA_UART_MCR_RTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		mcr &= ~TEGRA_UART_MCR_RTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (mcr != tup->mcr_shadow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		tegra_uart_write(tup, mcr, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		tup->mcr_shadow = mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static void set_dtr(struct tegra_uart_port *tup, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	unsigned long mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	mcr = tup->mcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		mcr |= UART_MCR_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		mcr &= ~UART_MCR_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (mcr != tup->mcr_shadow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		tegra_uart_write(tup, mcr, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		tup->mcr_shadow = mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static void set_loopbk(struct tegra_uart_port *tup, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	unsigned long mcr = tup->mcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	if (active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		mcr |= UART_MCR_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		mcr &= ~UART_MCR_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (mcr != tup->mcr_shadow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		tegra_uart_write(tup, mcr, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		tup->mcr_shadow = mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	int enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	tup->rts_active = !!(mctrl & TIOCM_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	set_rts(tup, tup->rts_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	enable = !!(mctrl & TIOCM_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	set_dtr(tup, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	enable = !!(mctrl & TIOCM_LOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	set_loopbk(tup, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	unsigned long lcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	lcr = tup->lcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (break_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		lcr |= UART_LCR_SBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		lcr &= ~UART_LCR_SBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	tegra_uart_write(tup, lcr, UART_LCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	tup->lcr_shadow = lcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * tegra_uart_wait_cycle_time: Wait for N UART clock periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * @tup:	Tegra serial port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * @cycles:	Number of clock periods to wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * clock speed is 16X the current baud rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 				       unsigned int cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (tup->current_baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) /* Wait for a symbol-time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		unsigned int syms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (tup->current_baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			tup->current_baud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	unsigned long iir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	unsigned int tmout = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		iir = tegra_uart_read(tup, UART_IIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	} while (--tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	unsigned long fcr = tup->fcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	unsigned int lsr, tmout = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		set_rts(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (tup->cdata->allow_txfifo_reset_fifo_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		tegra_uart_write(tup, fcr, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		fcr &= ~UART_FCR_ENABLE_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		tegra_uart_write(tup, fcr, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		udelay(60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		tegra_uart_write(tup, fcr, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		fcr |= UART_FCR_ENABLE_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		tegra_uart_write(tup, fcr, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		if (tup->cdata->fifo_mode_enable_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			tegra_uart_wait_fifo_mode_enabled(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/* Dummy read to ensure the write is posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	tegra_uart_read(tup, UART_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * For all tegra devices (up to t210), there is a hardware issue that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 * requires software to wait for 32 UART clock periods for the flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * to propagate, otherwise data could be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	tegra_uart_wait_cycle_time(tup, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	} while (--tmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		set_rts(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 				     unsigned int baud, long rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		if (baud >= tup->baud_tolerance[i].lower_range_baud &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		    baud <= tup->baud_tolerance[i].upper_range_baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			return (rate + (rate *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 				tup->baud_tolerance[i].tolerance) / 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		/ tup->required_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	    diff > (tup->cdata->error_tolerance_high_range * 100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			"configured baud rate is out of range by %ld", diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	unsigned long rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	unsigned int divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	unsigned long lcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (tup->current_baud == baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (tup->cdata->support_clk_src_div) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		rate = baud * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		tup->required_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		if (tup->n_adjustable_baud_rates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			rate = tegra_get_tolerance_rate(tup, baud, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		ret = clk_set_rate(tup->uart_clk, rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				"clk_set_rate() failed for rate %lu\n", rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		tup->configured_rate = clk_get_rate(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		divisor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		ret = tegra_check_rate_in_range(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		rate = clk_get_rate(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	spin_lock_irqsave(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	lcr = tup->lcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	lcr |= UART_LCR_DLAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	tegra_uart_write(tup, lcr, UART_LCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	tegra_uart_write(tup, divisor & 0xFF, UART_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	lcr &= ~UART_LCR_DLAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	tegra_uart_write(tup, lcr, UART_LCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* Dummy read to ensure the write is posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	tegra_uart_read(tup, UART_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	spin_unlock_irqrestore(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	tup->current_baud = baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/* wait two character intervals at new rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	tegra_uart_wait_sym_time(tup, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			unsigned long lsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	char flag = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		if (lsr & UART_LSR_OE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			/* Overrrun error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			flag = TTY_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			tup->uport.icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			dev_dbg(tup->uport.dev, "Got overrun errors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		} else if (lsr & UART_LSR_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			/* Parity error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			flag = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			tup->uport.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			dev_dbg(tup->uport.dev, "Got Parity errors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		} else if (lsr & UART_LSR_FE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			flag = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			tup->uport.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			dev_dbg(tup->uport.dev, "Got frame errors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		} else if (lsr & UART_LSR_BI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			 * Break error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			 * If FIFO read error without any data, reset Rx FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			if (tup->uport.ignore_status_mask & UART_LSR_BI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				return TTY_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			flag = TTY_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			tup->uport.icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			dev_dbg(tup->uport.dev, "Got Break\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	return flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static int tegra_uart_request_port(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static void tegra_uart_release_port(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	/* Nothing to do here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	for (i = 0; i < max_bytes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		BUG_ON(uart_circ_empty(xmit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (tup->cdata->tx_fifo_full_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			unsigned long lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		tup->uport.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (bytes > TEGRA_UART_MIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		bytes = TEGRA_UART_MIN_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	tup->tx_in_progress = TEGRA_UART_TX_PIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	tup->tx_bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	tup->ier_shadow |= UART_IER_THRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	tegra_uart_write(tup, tup->ier_shadow, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void tegra_uart_tx_dma_complete(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct tegra_uart_port *tup = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	count = tup->tx_bytes_requested - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	async_tx_ack(tup->tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	spin_lock_irqsave(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	tup->tx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		uart_write_wakeup(&tup->uport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	tegra_uart_start_next_tx(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	spin_unlock_irqrestore(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	dma_addr_t tx_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	tup->tx_bytes = count & ~(0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	dma_sync_single_for_device(tup->uport.dev, tx_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 				   tup->tx_bytes, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (!tup->tx_dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	tup->tx_dma_desc->callback_param = tup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	tup->tx_in_progress = TEGRA_UART_TX_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	tup->tx_bytes_requested = tup->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	dma_async_issue_pending(tup->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	unsigned long tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	if (!tup->current_baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	tail = (unsigned long)&xmit->buf[xmit->tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		tegra_uart_start_pio_tx(tup, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	else if (BYTES_TO_ALIGN(tail) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		tegra_uart_start_tx_dma(tup, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) /* Called by serial core driver with u->lock taken. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void tegra_uart_start_tx(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct circ_buf *xmit = &u->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		tegra_uart_start_next_tx(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static unsigned int tegra_uart_tx_empty(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	spin_lock_irqsave(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (!tup->tx_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		unsigned long lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			ret = TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	spin_unlock_irqrestore(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) static void tegra_uart_stop_tx(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	dmaengine_terminate_all(tup->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	count = tup->tx_bytes_requested - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	async_tx_ack(tup->tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	tup->tx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct circ_buf *xmit = &tup->uport.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	tup->tx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		uart_write_wakeup(&tup->uport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	tegra_uart_start_next_tx(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		struct tty_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		char flag = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		unsigned long lsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		unsigned char ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		if (!(lsr & UART_LSR_DR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		flag = tegra_uart_decode_rx_error(tup, lsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (flag != TTY_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		ch = (unsigned char) tegra_uart_read(tup, UART_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		tup->uport.icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (uart_handle_sysrq_char(&tup->uport, ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (tup->uport.ignore_status_mask & UART_LSR_DR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		tty_insert_flip_char(port, ch, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				      struct tty_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				      unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* If count is zero, then there is no data to be copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	tup->uport.icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (tup->uport.ignore_status_mask & UART_LSR_DR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				count, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	copied = tty_insert_flip_string(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			((unsigned char *)(tup->rx_dma_buf_virt)), count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (copied != count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				   count, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static void do_handle_rx_pio(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct tty_port *port = &tup->uport.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	tegra_uart_handle_rx_pio(tup, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (tty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		tty_kref_put(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				      unsigned int residue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct tty_port *port = &tup->uport.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	async_tx_ack(tup->rx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	count = tup->rx_bytes_requested - residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/* If we are here, DMA is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	tegra_uart_copy_rx_to_tty(tup, port, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	do_handle_rx_pio(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static void tegra_uart_rx_dma_complete(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	struct tegra_uart_port *tup = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct uart_port *u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	spin_lock_irqsave(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (status == DMA_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* Deactivate flow control to stop sender */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		set_rts(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	tup->rx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	tegra_uart_rx_buffer_push(tup, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	tegra_uart_start_rx_dma(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/* Activate flow control to start transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		set_rts(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	spin_unlock_irqrestore(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (!tup->rx_dma_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		do_handle_rx_pio(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	dmaengine_terminate_all(tup->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	tegra_uart_rx_buffer_push(tup, state.residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	tup->rx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* Deactivate flow control to stop sender */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		set_rts(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	tegra_uart_terminate_rx_dma(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		set_rts(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (tup->rx_dma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 				tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (!tup->rx_dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	tup->rx_dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	tup->rx_dma_desc->callback_param = tup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	tup->rx_bytes_requested = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	dma_async_issue_pending(tup->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	msr = tegra_uart_read(tup, UART_MSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (!(msr & UART_MSR_ANY_DELTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (msr & UART_MSR_TERI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		tup->uport.icount.rng++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (msr & UART_MSR_DDSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		tup->uport.icount.dsr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	/* We may only get DDCD when HW init and reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (msr & UART_MSR_DDCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* Will start/stop_tx accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (msr & UART_MSR_DCTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static irqreturn_t tegra_uart_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct tegra_uart_port *tup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct uart_port *u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	unsigned long iir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	unsigned long ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	bool is_rx_start = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	bool is_rx_int = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	spin_lock_irqsave(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		iir = tegra_uart_read(tup, UART_IIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		if (iir & UART_IIR_NO_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			if (!tup->use_rx_pio && is_rx_int) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				tegra_uart_handle_rx_dma(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				if (tup->rx_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 					ier = tup->ier_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 					ier |= (UART_IER_RLSI | UART_IER_RTOIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 						TEGRA_UART_IER_EORD | UART_IER_RDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 					tup->ier_shadow = ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 					tegra_uart_write(tup, ier, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			} else if (is_rx_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				tegra_uart_start_rx_dma(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			spin_unlock_irqrestore(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		switch ((iir >> 1) & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		case 0: /* Modem signal change interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			tegra_uart_handle_modem_signal_change(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		case 1: /* Transmit interrupt only triggered when using PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			tup->ier_shadow &= ~UART_IER_THRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			tegra_uart_write(tup, tup->ier_shadow, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			tegra_uart_handle_tx_pio(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		case 4: /* End of data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		case 6: /* Rx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			if (!tup->use_rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				is_rx_int = tup->rx_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				/* Disable Rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				ier = tup->ier_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				ier &= ~(UART_IER_RDI | UART_IER_RLSI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 					UART_IER_RTOIE | TEGRA_UART_IER_EORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				tup->ier_shadow = ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				tegra_uart_write(tup, ier, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		case 2: /* Receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			if (!tup->use_rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				is_rx_start = tup->rx_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 				tup->ier_shadow  &= ~UART_IER_RDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				tegra_uart_write(tup, tup->ier_shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 						 UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				do_handle_rx_pio(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		case 3: /* Receive error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			tegra_uart_decode_rx_error(tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 					tegra_uart_read(tup, UART_LSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		case 5: /* break nothing to handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		case 7: /* break nothing to handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static void tegra_uart_stop_rx(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct tty_port *port = &tup->uport.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	unsigned long ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		set_rts(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (!tup->rx_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	tegra_uart_wait_sym_time(tup, 1); /* wait one character interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	ier = tup->ier_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 					TEGRA_UART_IER_EORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	tup->ier_shadow = ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	tegra_uart_write(tup, ier, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	tup->rx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (!tup->use_rx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		tegra_uart_terminate_rx_dma(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		tegra_uart_handle_rx_pio(tup, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	unsigned long wait_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	unsigned long lsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	unsigned long mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	tegra_uart_write(tup, 0, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		msr = tegra_uart_read(tup, UART_MSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		mcr = tegra_uart_read(tup, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				"Tx Fifo not empty, CTS disabled, waiting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		/* Wait for Tx fifo to be empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			wait_time = min(fifo_empty_time, 100lu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			udelay(wait_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			fifo_empty_time -= wait_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			if (!fifo_empty_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				msr = tegra_uart_read(tup, UART_MSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				mcr = tegra_uart_read(tup, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					(msr & UART_MSR_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 					dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 						"Slave not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			lsr = tegra_uart_read(tup, UART_LSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	spin_lock_irqsave(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/* Reset the Rx and Tx FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	tup->current_baud = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	spin_unlock_irqrestore(&tup->uport.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	tup->rx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	tup->tx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (!tup->use_rx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		tegra_uart_dma_channel_free(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (!tup->use_tx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		tegra_uart_dma_channel_free(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	clk_disable_unprepare(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static int tegra_uart_hw_init(struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	tup->fcr_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	tup->mcr_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	tup->lcr_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	tup->ier_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	tup->current_baud = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	clk_prepare_enable(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	/* Reset the UART controller to clear all previous status.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	reset_control_assert(tup->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	reset_control_deassert(tup->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	tup->rx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	tup->tx_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * Set the trigger level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * For PIO mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * For receive, this will interrupt the CPU after that many number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * bytes are received, for the remaining bytes the receive timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * interrupt is received. Rx high watermark is set to 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * For transmit, if the trasnmit interrupt is enabled, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * interrupt the CPU when the number of entries in the FIFO reaches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * low watermark. Tx low watermark is set to 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * For DMA mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 * Set the Tx trigger to 16. This should match the DMA burst size that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * programmed in the DMA registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (tup->use_rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		tup->fcr_shadow |= UART_FCR_R_TRIG_11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		if (tup->cdata->max_dma_burst_bytes == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			tup->fcr_shadow |= UART_FCR_R_TRIG_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			tup->fcr_shadow |= UART_FCR_R_TRIG_01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/* Dummy read to ensure the write is posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	tegra_uart_read(tup, UART_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (tup->cdata->fifo_mode_enable_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		ret = tegra_uart_wait_fifo_mode_enabled(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 				"Failed to enable FIFO mode: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		 * For all tegra devices (up to t210), there is a hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		 * issue that requires software to wait for 3 UART clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		 * periods after enabling the TX fifo, otherwise data could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		 * be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		tegra_uart_wait_cycle_time(tup, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 * Initialize the UART with default configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 * (115200, N, 8, 1) so that the receive DMA buffer may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		dev_err(tup->uport.dev, "Failed to set baud rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (!tup->use_rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		tup->fcr_shadow |= UART_FCR_DMA_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	tup->rx_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 * Enable IE_RXS for the receive status interrupts like line errros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	 * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	 * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	 * the DATA is sitting in the FIFO and couldn't be transferred to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	 * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	 * triggered when there is a pause of the incomming data stream for 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * characters long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	 * For pauses in the data which is not aligned to 4 bytes, we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	 * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 * then the EORD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	 * If using DMA mode, enable EORD interrupt to notify about RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	 * completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (!tup->use_rx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		tup->ier_shadow |= TEGRA_UART_IER_EORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	tegra_uart_write(tup, tup->ier_shadow, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		bool dma_to_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (dma_to_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		dmaengine_terminate_all(tup->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		dma_release_channel(tup->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		tup->rx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		tup->rx_dma_buf_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		tup->rx_dma_buf_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		dmaengine_terminate_all(tup->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		dma_release_channel(tup->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			UART_XMIT_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		tup->tx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		tup->tx_dma_buf_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		tup->tx_dma_buf_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			bool dma_to_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	unsigned char *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	dma_addr_t dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct dma_slave_config dma_sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (IS_ERR(dma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		ret = PTR_ERR(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			"DMA channel alloc failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (dma_to_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		dma_buf = dma_alloc_coherent(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				TEGRA_UART_RX_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				 &dma_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		if (!dma_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				"Not able to allocate the dma buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			dma_release_channel(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		dma_sync_single_for_device(tup->uport.dev, dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 					   TEGRA_UART_RX_DMA_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		dma_sconfig.src_addr = tup->uport.mapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		tup->rx_dma_chan = dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		tup->rx_dma_buf_virt = dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		tup->rx_dma_buf_phys = dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		dma_phys = dma_map_single(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (dma_mapping_error(tup->uport.dev, dma_phys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			dev_err(tup->uport.dev, "dma_map_single tx failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			dma_release_channel(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		dma_buf = tup->uport.state->xmit.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		dma_sconfig.dst_addr = tup->uport.mapbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		dma_sconfig.dst_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		tup->tx_dma_chan = dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		tup->tx_dma_buf_virt = dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		tup->tx_dma_buf_phys = dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		dev_err(tup->uport.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			"Dma slave config failed, err = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		tegra_uart_dma_channel_free(tup, dma_to_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static int tegra_uart_startup(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (!tup->use_tx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		ret = tegra_uart_dma_channel_allocate(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (!tup->use_rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		ret = tegra_uart_dma_channel_allocate(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			goto fail_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	ret = tegra_uart_hw_init(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		goto fail_hw_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	ret = request_irq(u->irq, tegra_uart_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 				dev_name(u->dev), tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		goto fail_hw_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) fail_hw_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (!tup->use_rx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		tegra_uart_dma_channel_free(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) fail_rx_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (!tup->use_tx_pio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		tegra_uart_dma_channel_free(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  * Flush any TX data submitted for DMA and PIO. Called when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  * TX circular buffer is reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static void tegra_uart_flush_buffer(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	tup->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (tup->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		dmaengine_terminate_all(tup->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static void tegra_uart_shutdown(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	tegra_uart_hw_deinit(tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	free_irq(u->irq, tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static void tegra_uart_enable_ms(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (tup->enable_modem_interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		tup->ier_shadow |= UART_IER_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		tegra_uart_write(tup, tup->ier_shadow, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void tegra_uart_set_termios(struct uart_port *u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		struct ktermios *termios, struct ktermios *oldtermios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	struct tegra_uart_port *tup = to_tegra_uport(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	unsigned int baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	unsigned int lcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	int symb_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct clk *parent_clk = clk_get_parent(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	unsigned long parent_clk_rate = clk_get_rate(parent_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	max_divider *= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	spin_lock_irqsave(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	/* Changing configuration, it is safe to stop any rx now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		set_rts(tup, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/* Clear all interrupts as configuration is going to be changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	tegra_uart_read(tup, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	tegra_uart_write(tup, 0, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	tegra_uart_read(tup, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	/* Parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	lcr = tup->lcr_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	lcr &= ~UART_LCR_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	/* CMSPAR isn't supported by this driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	termios->c_cflag &= ~CMSPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if ((termios->c_cflag & PARENB) == PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		symb_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		if (termios->c_cflag & PARODD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			lcr |= UART_LCR_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			lcr &= ~UART_LCR_EPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			lcr &= ~UART_LCR_SPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			lcr |= UART_LCR_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			lcr |= UART_LCR_EPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			lcr &= ~UART_LCR_SPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	lcr &= ~UART_LCR_WLEN8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	switch (termios->c_cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	case CS5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		lcr |= UART_LCR_WLEN5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		symb_bit += 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	case CS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		lcr |= UART_LCR_WLEN6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		symb_bit += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	case CS7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		lcr |= UART_LCR_WLEN7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		symb_bit += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		lcr |= UART_LCR_WLEN8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		symb_bit += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	/* Stop bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	if (termios->c_cflag & CSTOPB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		lcr |= UART_LCR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		symb_bit += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		lcr &= ~UART_LCR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		symb_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	tegra_uart_write(tup, lcr, UART_LCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	tup->lcr_shadow = lcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	tup->symb_bit = symb_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	/* Baud rate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	baud = uart_get_baud_rate(u, termios, oldtermios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			parent_clk_rate/max_divider,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			parent_clk_rate/16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	spin_unlock_irqrestore(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	ret = tegra_set_baudrate(tup, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		dev_err(tup->uport.dev, "Failed to set baud rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (tty_termios_baud_rate(termios))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		tty_termios_encode_baud_rate(termios, baud, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	spin_lock_irqsave(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* Flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (termios->c_cflag & CRTSCTS)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		/* if top layer has asked to set rts active then do so here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		if (tup->rts_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			set_rts(tup, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	/* update the port timeout based on new settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	uart_update_timeout(u, termios->c_cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	/* Make sure all writes have completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	tegra_uart_read(tup, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Re-enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	tegra_uart_write(tup, tup->ier_shadow, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	tegra_uart_read(tup, UART_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	tup->uport.ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/* Ignore all characters if CREAD is not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if ((termios->c_cflag & CREAD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		tup->uport.ignore_status_mask |= UART_LSR_DR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (termios->c_iflag & IGNBRK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		tup->uport.ignore_status_mask |= UART_LSR_BI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	spin_unlock_irqrestore(&u->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static const char *tegra_uart_type(struct uart_port *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	return TEGRA_UART_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static const struct uart_ops tegra_uart_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	.tx_empty	= tegra_uart_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	.set_mctrl	= tegra_uart_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	.get_mctrl	= tegra_uart_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	.stop_tx	= tegra_uart_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	.start_tx	= tegra_uart_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	.stop_rx	= tegra_uart_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	.flush_buffer	= tegra_uart_flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	.enable_ms	= tegra_uart_enable_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	.break_ctl	= tegra_uart_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	.startup	= tegra_uart_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	.shutdown	= tegra_uart_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	.set_termios	= tegra_uart_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	.type		= tegra_uart_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	.request_port	= tegra_uart_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	.release_port	= tegra_uart_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static struct uart_driver tegra_uart_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	.driver_name	= "tegra_hsuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	.dev_name	= "ttyTHS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	.cons		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	.nr		= TEGRA_UART_MAXIMUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static int tegra_uart_parse_dt(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct tegra_uart_port *tup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	u32 pval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	int n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	port = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (port < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	tup->uport.line = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	tup->enable_modem_interrupt = of_property_read_bool(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 					"nvidia,enable-modem-interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	index = of_property_match_string(np, "dma-names", "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		tup->use_rx_pio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		dev_info(&pdev->dev, "RX in PIO mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	index = of_property_match_string(np, "dma-names", "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		tup->use_tx_pio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		dev_info(&pdev->dev, "TX in PIO mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (n_entries > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		tup->n_adjustable_baud_rates = n_entries / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		tup->baud_tolerance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			     sizeof(*tup->baud_tolerance), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		if (!tup->baud_tolerance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		for (count = 0, index = 0; count < n_entries; count += 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		     index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			of_property_read_u32_index(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 						   "nvidia,adjust-baud-rates",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 						   count, &pval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 				tup->baud_tolerance[index].lower_range_baud =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 				pval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			of_property_read_u32_index(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 						   "nvidia,adjust-baud-rates",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 						   count + 1, &pval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 				tup->baud_tolerance[index].upper_range_baud =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 				pval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			of_property_read_u32_index(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 						   "nvidia,adjust-baud-rates",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 						   count + 2, &pval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 				tup->baud_tolerance[index].tolerance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 				(s32)pval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		tup->n_adjustable_baud_rates = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static struct tegra_uart_chip_data tegra20_uart_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	.tx_fifo_full_status		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	.allow_txfifo_reset_fifo_mode	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	.support_clk_src_div		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	.fifo_mode_enable_status	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	.uart_max_port			= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	.max_dma_burst_bytes		= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	.error_tolerance_low_range	= -4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	.error_tolerance_high_range	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static struct tegra_uart_chip_data tegra30_uart_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	.tx_fifo_full_status		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	.allow_txfifo_reset_fifo_mode	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	.support_clk_src_div		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	.fifo_mode_enable_status	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	.uart_max_port			= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	.max_dma_burst_bytes		= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	.error_tolerance_low_range	= -4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	.error_tolerance_high_range	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static struct tegra_uart_chip_data tegra186_uart_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	.tx_fifo_full_status		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	.allow_txfifo_reset_fifo_mode	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	.support_clk_src_div		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	.fifo_mode_enable_status	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	.uart_max_port			= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	.max_dma_burst_bytes		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	.error_tolerance_low_range	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	.error_tolerance_high_range	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static struct tegra_uart_chip_data tegra194_uart_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	.tx_fifo_full_status		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	.allow_txfifo_reset_fifo_mode	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	.support_clk_src_div		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	.fifo_mode_enable_status	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	.uart_max_port			= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	.max_dma_burst_bytes		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	.error_tolerance_low_range	= -2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	.error_tolerance_high_range	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static const struct of_device_id tegra_uart_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		.compatible	= "nvidia,tegra30-hsuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		.data		= &tegra30_uart_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		.compatible	= "nvidia,tegra20-hsuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		.data		= &tegra20_uart_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		.compatible     = "nvidia,tegra186-hsuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		.data		= &tegra186_uart_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		.compatible     = "nvidia,tegra194-hsuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		.data		= &tegra194_uart_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int tegra_uart_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct tegra_uart_port *tup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct uart_port *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	struct resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	const struct tegra_uart_chip_data *cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	match = of_match_device(tegra_uart_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		dev_err(&pdev->dev, "Error: No device match found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	cdata = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (!tup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	ret = tegra_uart_parse_dt(pdev, tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	u->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	u->ops = &tegra_uart_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	u->type = PORT_TEGRA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	u->fifosize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	tup->cdata = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	platform_set_drvdata(pdev, tup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	if (!resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		dev_err(&pdev->dev, "No IO memory resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	u->mapbase = resource->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	u->membase = devm_ioremap_resource(&pdev->dev, resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (IS_ERR(u->membase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		return PTR_ERR(u->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if (IS_ERR(tup->uart_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		dev_err(&pdev->dev, "Couldn't get the clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		return PTR_ERR(tup->uart_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (IS_ERR(tup->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		dev_err(&pdev->dev, "Couldn't get the reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		return PTR_ERR(tup->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	u->iotype = UPIO_MEM32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	u->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	u->regshift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	ret = uart_add_one_port(&tegra_uart_driver, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static int tegra_uart_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	struct tegra_uart_port *tup = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	struct uart_port *u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	uart_remove_one_port(&tegra_uart_driver, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static int tegra_uart_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	struct tegra_uart_port *tup = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct uart_port *u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	return uart_suspend_port(&tegra_uart_driver, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static int tegra_uart_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	struct tegra_uart_port *tup = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	struct uart_port *u = &tup->uport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	return uart_resume_port(&tegra_uart_driver, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static const struct dev_pm_ops tegra_uart_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static struct platform_driver tegra_uart_platform_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	.probe		= tegra_uart_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	.remove		= tegra_uart_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		.name	= "serial-tegra",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		.of_match_table = tegra_uart_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		.pm	= &tegra_uart_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static int __init tegra_uart_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	const struct of_device_id *match = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	const struct tegra_uart_chip_data *cdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	node = of_find_matching_node(NULL, tegra_uart_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		match = of_match_node(tegra_uart_of_match, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		cdata = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	if (cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		tegra_uart_driver.nr = cdata->uart_max_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	ret = uart_register_driver(&tegra_uart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		pr_err("Could not register %s driver\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		       tegra_uart_driver.driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	ret = platform_driver_register(&tegra_uart_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		pr_err("Uart platform driver register failed, e = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		uart_unregister_driver(&tegra_uart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void __exit tegra_uart_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	pr_info("Unloading tegra uart driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	platform_driver_unregister(&tegra_uart_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	uart_unregister_driver(&tegra_uart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) module_init(tegra_uart_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) module_exit(tegra_uart_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) MODULE_ALIAS("platform:serial-tegra");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) MODULE_LICENSE("GPL v2");