Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for CSR SiRFprimaII onboard UARTs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/sysrq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/mach/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "sirfsoc_uart.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static struct uart_driver sirfsoc_uart_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static void sirfsoc_uart_tx_dma_complete_callback(void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	{4000000, 2359296},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	{3500000, 1310721},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	{3000000, 1572865},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	{2500000, 1245186},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	{2000000, 1572866},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	{1500000, 1245188},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	{1152000, 1638404},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	{1000000, 1572869},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	{921600, 1114120},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	{576000, 1245196},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	{500000, 1245198},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	{460800, 1572876},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	{230400, 1310750},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	{115200, 1310781},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	{57600, 1310843},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	{38400, 1114328},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	{19200, 1114545},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	{9600, 1114979},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	return container_of(port, struct sirfsoc_uart_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	unsigned long reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		goto cts_asserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 						SIRFUART_AFC_CTS_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 			goto cts_asserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			goto cts_deasserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		if (!gpio_get_value(sirfport->cts_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			goto cts_asserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 			goto cts_deasserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) cts_deasserted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	return TIOCM_CAR | TIOCM_DSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) cts_asserted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	unsigned int assert = mctrl & TIOCM_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	unsigned int current_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (mctrl & TIOCM_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			wr_regl(port, ureg->sirfsoc_line_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 				rd_regl(port, ureg->sirfsoc_line_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 				SIRFUART_LOOP_BACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			wr_regl(port, ureg->sirfsoc_mode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 				rd_regl(port, ureg->sirfsoc_mode1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 				SIRFSOC_USP_LOOP_BACK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			wr_regl(port, ureg->sirfsoc_line_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 				rd_regl(port, ureg->sirfsoc_line_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 				~SIRFUART_LOOP_BACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			wr_regl(port, ureg->sirfsoc_mode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				rd_regl(port, ureg->sirfsoc_mode1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 				~SIRFSOC_USP_LOOP_BACK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		val |= current_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			gpio_set_value(sirfport->rts_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			gpio_set_value(sirfport->rts_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static void sirfsoc_uart_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (sirfport->tx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			dmaengine_pause(sirfport->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			sirfport->tx_dma_state = TX_DMA_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 				wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				rd_regl(port, ureg->sirfsoc_int_en_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 				~uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				rd_regl(port, ureg->sirfsoc_int_en_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				~uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned long tran_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	unsigned long tran_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	unsigned long pio_tx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	tran_start = (unsigned long)(xmit->buf + xmit->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			!tran_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		dmaengine_resume(sirfport->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	if (sirfport->tx_dma_state == TX_DMA_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 				rd_regl(port, ureg->sirfsoc_int_en_reg)&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 				~(uint_en->sirfsoc_txfifo_empty_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 				uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * DMA requires buffer address and buffer length are both aligned with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * 4 bytes, so we use PIO for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	 * bytes, and move to DMA for the left part aligned with 4bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 * part first, move to PIO for the left 1~3 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (BYTES_TO_ALIGN(tran_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 				BYTES_TO_ALIGN(tran_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			tran_size -= pio_tx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		if (tran_size < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 				rd_regl(port, ureg->sirfsoc_int_en_reg)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		/* tx transfer mode switch into dma mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			~SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		tran_size &= ~(0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		sirfport->tx_dma_addr = dma_map_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			xmit->buf + xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			tran_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		sirfport->tx_dma_desc = dmaengine_prep_slave_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			sirfport->tx_dma_chan, sirfport->tx_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		if (!sirfport->tx_dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			dev_err(port->dev, "DMA prep slave single fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		sirfport->tx_dma_desc->callback =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			sirfsoc_uart_tx_dma_complete_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		sirfport->tx_dma_desc->callback_param = (void *)sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		sirfport->transfer_size = tran_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		dmaengine_submit(sirfport->tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		dma_async_issue_pending(sirfport->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		sirfport->tx_dma_state = TX_DMA_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void sirfsoc_uart_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (sirfport->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		sirfsoc_uart_tx_with_dma(sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 				ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 					rd_regl(port, ureg->sirfsoc_int_en_reg)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 					uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 					uint_en->sirfsoc_txfifo_empty_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static void sirfsoc_uart_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (sirfport->rx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				rd_regl(port, ureg->sirfsoc_int_en_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 				~(SIRFUART_RX_DMA_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				sirfport->uart_reg->uart_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				uint_en->sirfsoc_rx_done_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 				SIRFUART_RX_DMA_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				sirfport->uart_reg->uart_type)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				uint_en->sirfsoc_rx_done_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		dmaengine_terminate_all(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				rd_regl(port, ureg->sirfsoc_int_en_reg)&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 				~(SIRFUART_RX_IO_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				sirfport->uart_reg->uart_type)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				SIRFUART_RX_IO_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				sirfport->uart_reg->uart_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static void sirfsoc_uart_disable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (!sirfport->hw_flow_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	sirfport->ms_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		wr_regl(port, ureg->sirfsoc_afc_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 					rd_regl(port, ureg->sirfsoc_int_en_reg)&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 					~uint_en->sirfsoc_cts_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 					uint_en->sirfsoc_cts_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		disable_irq(gpio_to_irq(sirfport->cts_gpio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		uart_handle_cts_change(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				!gpio_get_value(sirfport->cts_gpio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static void sirfsoc_uart_enable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (!sirfport->hw_flow_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	sirfport->ms_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		wr_regl(port, ureg->sirfsoc_afc_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				rd_regl(port, ureg->sirfsoc_afc_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 				SIRFUART_AFC_CTRL_RX_THD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 					rd_regl(port, ureg->sirfsoc_int_en_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 					| uint_en->sirfsoc_cts_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 					uint_en->sirfsoc_cts_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		enable_irq(gpio_to_irq(sirfport->cts_gpio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		if (break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			ulcon |= SIRFUART_SET_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			ulcon &= ~SIRFUART_SET_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	unsigned int ch, rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct tty_struct *tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	tty = tty_port_tty_get(&port->state->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (!tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 					ufifo_st->ff_empty(port))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			SIRFUART_DUMMY_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		if (unlikely(uart_handle_sysrq_char(port, ch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		rx_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		if (rx_count >= max_rx_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	port->icount.rx += rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	unsigned int num_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	while (!uart_circ_empty(xmit) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		!(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 					ufifo_st->ff_full(port)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		wr_regl(port, ureg->sirfsoc_tx_fifo_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				xmit->buf[xmit->tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		port->icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		num_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return num_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static void sirfsoc_uart_tx_dma_complete_callback(void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	xmit->tail = (xmit->tail + sirfport->transfer_size) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				(UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	port->icount.tx += sirfport->transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (sirfport->tx_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		dma_unmap_single(port->dev, sirfport->tx_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				sirfport->transfer_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	sirfport->tx_dma_state = TX_DMA_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	sirfsoc_uart_tx_with_dma(sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	unsigned long intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	unsigned long cts_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	unsigned long flag = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct uart_state *state = port->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				sirfport->uart_reg->uart_type)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		if (intr_status & uint_st->sirfsoc_rxd_brk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			port->icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			if (uart_handle_break(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 				goto recv_char;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (intr_status & uint_st->sirfsoc_rx_oflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			port->icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			flag = TTY_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		if (intr_status & uint_st->sirfsoc_frm_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			port->icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			flag = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		if (intr_status & uint_st->sirfsoc_parity_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			port->icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			flag = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		intr_status &= port->read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		uart_insert_char(port, intr_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 					uint_en->sirfsoc_rx_oflow_en, 0, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) recv_char:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			(intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			!sirfport->tx_dma_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 					SIRFUART_AFC_CTS_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		if (cts_status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			cts_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			cts_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		uart_handle_cts_change(port, cts_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		wake_up_interruptible(&state->port.delta_msr_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (!sirfport->rx_dma_chan &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		(intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		 * chip will trigger continuous RX_TIMEOUT interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		 * in RXFIFO empty and not trigger if RXFIFO recevice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		 * data in limit time, original method use RX_TIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		 * will trigger lots of useless interrupt in RXFIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		 * empty.RXFIFO received one byte will trigger RX_DONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		 * interrupt.use RX_DONE to wait for data received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		 * into RXFIFO, use RX_THD/RX_FULL for lots data receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		 * and use RX_TIMEOUT for the last left data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (intr_status & uint_st->sirfsoc_rx_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			if (!sirfport->is_atlas7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 				wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 					rd_regl(port, ureg->sirfsoc_int_en_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 					& ~(uint_en->sirfsoc_rx_done_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 				rd_regl(port, ureg->sirfsoc_int_en_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				| (uint_en->sirfsoc_rx_timeout_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 				wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 					uint_en->sirfsoc_rx_done_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 				wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 					uint_en->sirfsoc_rx_timeout_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			if (intr_status & uint_st->sirfsoc_rx_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				if (!sirfport->is_atlas7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 					rd_regl(port, ureg->sirfsoc_int_en_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 					& ~(uint_en->sirfsoc_rx_timeout_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 					wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 					rd_regl(port, ureg->sirfsoc_int_en_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 					| (uint_en->sirfsoc_rx_done_en));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 					wr_regl(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 						ureg->sirfsoc_int_en_clr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 						uint_en->sirfsoc_rx_timeout_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 					wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 						uint_en->sirfsoc_rx_done_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			sirfsoc_uart_pio_rx_chars(port, port->fifosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	tty_flip_buffer_push(&state->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if (sirfport->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			sirfsoc_uart_tx_with_dma(sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 				return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 				sirfsoc_uart_pio_tx_chars(sirfport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 						port->fifosize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 				if ((uart_circ_empty(xmit)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 				(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				ufifo_st->ff_empty(port)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 					sirfsoc_uart_stop_tx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static void sirfsoc_uart_rx_dma_complete_callback(void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) /* submit rx dma task into dmaengine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		~SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	sirfport->rx_dma_items.xmit.tail =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		sirfport->rx_dma_items.xmit.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	sirfport->rx_dma_items.desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		SIRFSOC_RX_DMA_BUF_SIZE / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		dev_err(port->dev, "DMA slave single fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	sirfport->rx_dma_items.desc->callback =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		sirfsoc_uart_rx_dma_complete_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	sirfport->rx_dma_items.desc->callback_param = sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	sirfport->rx_dma_items.cookie =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		dmaengine_submit(sirfport->rx_dma_items.desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	dma_async_issue_pending(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 				rd_regl(port, ureg->sirfsoc_int_en_reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				SIRFUART_RX_DMA_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				sirfport->uart_reg->uart_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				SIRFUART_RX_DMA_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				sirfport->uart_reg->uart_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) sirfsoc_usp_calc_sample_div(unsigned long set_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		unsigned long ioclk_rate, unsigned long *sample_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	unsigned long min_delta = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	unsigned short sample_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	unsigned long ioclk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	unsigned long temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	for (sample_div = SIRF_USP_MIN_SAMPLE_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		temp_delta = ioclk_rate -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		(ioclk_rate + (set_rate * sample_div) / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		/ (set_rate * sample_div) * set_rate * sample_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		if (temp_delta < min_delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			ioclk_div = (2 * ioclk_rate /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				(set_rate * sample_div) + 1) / 2 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			if (ioclk_div > SIRF_IOCLK_DIV_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			min_delta = temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			*sample_reg = sample_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			if (!temp_delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	return ioclk_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			unsigned long ioclk_rate, unsigned long *set_baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	unsigned long min_delta = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	unsigned short sample_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	unsigned int regv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	unsigned long ioclk_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	unsigned long baud_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	int temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	for (sample_div = SIRF_MIN_SAMPLE_DIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if (ioclk_div > SIRF_IOCLK_DIV_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		temp_delta = baud_tmp - baud_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (temp_delta < min_delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			regv = regv & (~SIRF_IOCLK_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			regv = regv | ioclk_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			regv = regv & (~SIRF_SAMPLE_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			min_delta = temp_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			*set_baud = baud_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	return regv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static void sirfsoc_uart_set_termios(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 				       struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 				       struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	unsigned long	config_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	unsigned long	baud_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	unsigned long	set_baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	unsigned long	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	unsigned long	ic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	unsigned int	clk_div_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	unsigned long	txfifo_op_reg, ioclk_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	unsigned long	rx_time_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int		threshold_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	u32		data_bit_len, stop_bit_len, len_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	unsigned long	sample_div_reg = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	ioclk_rate	= port->uartclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	switch (termios->c_cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	case CS8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		data_bit_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		config_reg |= SIRFUART_DATA_BIT_LEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	case CS7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		data_bit_len = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		config_reg |= SIRFUART_DATA_BIT_LEN_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	case CS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		data_bit_len = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		config_reg |= SIRFUART_DATA_BIT_LEN_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	case CS5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		data_bit_len = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		config_reg |= SIRFUART_DATA_BIT_LEN_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (termios->c_cflag & CSTOPB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		config_reg |= SIRFUART_STOP_BIT_LEN_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		stop_bit_len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		stop_bit_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	port->ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				uint_en->sirfsoc_parity_err_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			port->ignore_status_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				uint_en->sirfsoc_frm_err_en |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				uint_en->sirfsoc_parity_err_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		if (termios->c_cflag & PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 				if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 					config_reg |= SIRFUART_STICK_BIT_MARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 					config_reg |= SIRFUART_STICK_BIT_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 					config_reg |= SIRFUART_STICK_BIT_ODD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 					config_reg |= SIRFUART_STICK_BIT_EVEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			port->ignore_status_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				uint_en->sirfsoc_frm_err_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		if (termios->c_cflag & PARENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			dev_warn(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 					"USP-UART not support parity err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		port->ignore_status_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			uint_en->sirfsoc_rxd_brk_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			port->ignore_status_mask |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				uint_en->sirfsoc_rx_oflow_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if ((termios->c_cflag & CREAD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		port->ignore_status_mask |= SIRFUART_DUMMY_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	/* Hardware Flow Control Settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (UART_ENABLE_MS(port, termios->c_cflag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		if (!sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			sirfsoc_uart_enable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if (sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			sirfsoc_uart_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (ioclk_rate == 150000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			if (baud_rate == baudrate_to_regv[ic].baud_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				clk_div_reg = baudrate_to_regv[ic].reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	set_baud = baud_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		if (unlikely(clk_div_reg == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 					ioclk_rate, &set_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 				ioclk_rate, &sample_div_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		sample_div_reg--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				(sample_div_reg + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		/* setting usp mode 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				(1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 				<< SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		wr_regl(port, ureg->sirfsoc_mode2, len_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (tty_termios_baud_rate(termios))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		tty_termios_encode_baud_rate(termios, set_baud, set_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* set receive timeout && data bits len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	wr_regl(port, ureg->sirfsoc_tx_fifo_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			(txfifo_op_reg & ~SIRFUART_FIFO_START));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		/*tx frame ctrl*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		len_val |= ((data_bit_len - 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		len_val |= (((clk_div_reg & 0xc00) >> 10) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 				SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		/*rx frame ctrl*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		len_val |= (data_bit_len - 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		len_val |= (((clk_div_reg & 0xf000) >> 12) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		/*async param*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		wr_regl(port, ureg->sirfsoc_async_param_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			(SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			(sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			SIRFSOC_USP_ASYNC_DIV2_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (sirfport->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			~SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	sirfport->rx_period_time = 20000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (set_baud < 1000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		threshold_div = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		threshold_div = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				SIRFUART_FIFO_THD(port) / threshold_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				SIRFUART_FIFO_THD(port) / threshold_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	txfifo_op_reg |= SIRFUART_FIFO_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	uart_update_timeout(port, termios->c_cflag, set_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			      unsigned int oldstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		clk_prepare_enable(sirfport->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		clk_disable_unprepare(sirfport->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static int sirfsoc_uart_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	struct sirfsoc_uart_port *sirfport	= to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	unsigned int index			= port->line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	ret = request_irq(port->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				sirfsoc_uart_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				SIRFUART_PORT_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 							index, port->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		goto irq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/* initial hardware settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		~SIRFUART_RX_DMA_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		wr_regl(port, ureg->sirfsoc_mode1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			SIRFSOC_USP_ENDIAN_CTRL_LSBF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			SIRFSOC_USP_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (sirfport->tx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		sirfport->tx_dma_state = TX_DMA_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 				SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	sirfport->ms_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		sirfport->hw_flow_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		irq_modify_status(gpio_to_irq(sirfport->cts_gpio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			IRQ_NOREQUEST, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			dev_err(port->dev, "UART-USP:request gpio irq fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			goto init_rx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		wr_regl(port, ureg->sirfsoc_swh_dma_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			SIRFUART_CLEAR_RX_ADDR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			SIRFSOC_USP_FRADDR_CLR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		sirfport->is_hrt_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		sirfport->rx_period_time = 20000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		sirfport->rx_last_pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		sirfport->pio_fetch_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		sirfport->rx_dma_items.xmit.tail =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			sirfport->rx_dma_items.xmit.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		hrtimer_start(&sirfport->hrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			ns_to_ktime(sirfport->rx_period_time),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		sirfsoc_uart_start_next_rx_dma(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				rd_regl(port, ureg->sirfsoc_int_en_reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				SIRFUART_RX_IO_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 					sirfport->uart_reg->uart_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			wr_regl(port, ureg->sirfsoc_int_en_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 				SIRFUART_RX_IO_INT_EN(uint_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 					sirfport->uart_reg->uart_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	enable_irq(port->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) init_rx_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	free_irq(port->irq, sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) irq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void sirfsoc_uart_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct circ_buf *xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	xmit = &sirfport->rx_dma_items.xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (!sirfport->is_atlas7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	free_irq(port->irq, sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	if (sirfport->ms_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		sirfsoc_uart_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			sirfport->hw_flow_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		gpio_set_value(sirfport->rts_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (sirfport->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		sirfport->tx_dma_state = TX_DMA_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			!CIRC_CNT(xmit->head, xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			SIRFSOC_RX_DMA_BUF_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		sirfport->is_hrt_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		hrtimer_cancel(&sirfport->hrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static const char *sirfsoc_uart_type(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static int sirfsoc_uart_request_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	ret = request_mem_region(port->mapbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		SIRFUART_MAP_SIZE, uart_param->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	return ret ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void sirfsoc_uart_release_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (flags & UART_CONFIG_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		port->type = SIRFSOC_PORT_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		sirfsoc_uart_request_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static const struct uart_ops sirfsoc_uart_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	.tx_empty	= sirfsoc_uart_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	.get_mctrl	= sirfsoc_uart_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	.set_mctrl	= sirfsoc_uart_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	.stop_tx	= sirfsoc_uart_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	.start_tx	= sirfsoc_uart_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	.stop_rx	= sirfsoc_uart_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	.enable_ms	= sirfsoc_uart_enable_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	.break_ctl	= sirfsoc_uart_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	.startup	= sirfsoc_uart_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	.shutdown	= sirfsoc_uart_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	.set_termios	= sirfsoc_uart_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	.pm		= sirfsoc_uart_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	.type		= sirfsoc_uart_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	.release_port	= sirfsoc_uart_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	.request_port	= sirfsoc_uart_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	.config_port	= sirfsoc_uart_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) sirfsoc_uart_console_setup(struct console *co, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	unsigned int baud = 115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	unsigned int bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	unsigned int parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	unsigned int flow = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	struct sirfsoc_uart_port *sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct sirfsoc_register *ureg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		co->index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	sirfport = sirf_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (!sirfport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!sirfport->port.mapbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	/* enable usp in mode1 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				SIRFSOC_USP_ENDIAN_CTRL_LSBF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		uart_parse_options(options, &baud, &parity, &bits, &flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	sirfport->port.cons = co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* default console tx/rx transfer using io mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	sirfport->rx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	sirfport->tx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	return uart_set_options(&sirfport->port, co, baud, parity, bits, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		ufifo_st->ff_full(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void sirfsoc_uart_console_write(struct console *co, const char *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 							unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct sirfsoc_uart_port *sirfport = sirf_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	uart_console_write(&sirfport->port, s, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			sirfsoc_uart_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static struct console sirfsoc_uart_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	.name		= SIRFSOC_UART_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	.device		= uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	.flags		= CON_PRINTBUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	.index		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	.write		= sirfsoc_uart_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	.setup		= sirfsoc_uart_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	.data           = &sirfsoc_uart_drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int __init sirfsoc_uart_console_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	register_console(&sirfsoc_uart_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) console_initcall(sirfsoc_uart_console_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static struct uart_driver sirfsoc_uart_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	.driver_name	= SIRFUART_PORT_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	.nr		= SIRFSOC_UART_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	.dev_name	= SIRFSOC_UART_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	.major		= SIRFSOC_UART_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	.minor		= SIRFSOC_UART_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	.cons			= &sirfsoc_uart_console,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	.cons			= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static enum hrtimer_restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct sirfsoc_uart_port *sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct uart_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	int count, inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct dma_tx_state tx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	struct tty_struct *tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct sirfsoc_register *ureg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct circ_buf *xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct sirfsoc_fifo_status *ufifo_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	int max_pio_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	inserted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	tty = port->state->port.tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	ureg = &sirfport->uart_reg->uart_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	xmit = &sirfport->rx_dma_items.xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	ufifo_st = &sirfport->uart_reg->fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	dmaengine_tx_status(sirfport->rx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			sirfport->rx_dma_items.cookie, &tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		sirfport->rx_last_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		sirfport->rx_last_pos = xmit->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		sirfport->pio_fetch_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			SIRFSOC_RX_DMA_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		inserted = tty_insert_flip_string(tty->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			(const unsigned char *)&xmit->buf[xmit->tail], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		if (!inserted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			goto next_hrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		port->icount.rx += inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		xmit->tail = (xmit->tail + inserted) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				(SIRFSOC_RX_DMA_BUF_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				SIRFSOC_RX_DMA_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		tty_flip_buffer_push(tty->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 * if RX DMA buffer data have all push into tty buffer, and there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	 * only little data(less than a dma transfer unit) left in rxfifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 * fetch it out in pio mode and switch back to dma immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (!inserted && !count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		dmaengine_pause(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		/* switch to pio mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		 * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		 * When found changing I/O to DMA mode, it clears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		 * two low bits of read point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		 * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		 * Fetch data out from rxfifo into DMA buffer in PIO mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		 * while switch back to DMA mode, the data fetched will override
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		 * by DMA, as hardware have a strange behaviour:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		 * after switch back to DMA mode, check rxfifo status it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		 * be the number PIO fetched, so record the fetched data count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * to avoid the repeated fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		max_pio_cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			ufifo_st->ff_empty(port)) && max_pio_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			xmit->buf[xmit->head] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				rd_regl(port, ureg->sirfsoc_rx_fifo_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			xmit->head = (xmit->head + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 					(SIRFSOC_RX_DMA_BUF_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			sirfport->pio_fetch_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		/* switch back to dma mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			~SIRFUART_IO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		dmaengine_resume(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) next_hrt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static const struct of_device_id sirfsoc_uart_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	{ .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	{ .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int sirfsoc_uart_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct sirfsoc_uart_port *sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct uart_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	struct dma_slave_config slv_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		.src_maxburst = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct dma_slave_config tx_slv_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		.dst_maxburst = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	match = of_match_node(sirfsoc_uart_ids, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (!sirfport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	sirfport->port.line = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (sirfport->port.line >= ARRAY_SIZE(sirf_ports)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		dev_err(&pdev->dev, "serial%d out of range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			sirfport->port.line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	sirf_ports[sirfport->port.line] = sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	sirfport->port.iotype = UPIO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	sirfport->port.flags = UPF_BOOT_AUTOCONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	port->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	port->private_data = sirfport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	sirfport->hw_flow_ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		of_property_read_bool(np, "uart-has-rtscts") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (of_device_is_compatible(np, "sirf,prima2-uart") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		of_device_is_compatible(np, "sirf,atlas7-uart"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		sirfport->uart_reg->uart_type = SIRF_REAL_UART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (of_device_is_compatible(np, "sirf,prima2-usp-uart") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	    of_device_is_compatible(np, "sirf,atlas7-usp-uart")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		sirfport->uart_reg->uart_type =	SIRF_USP_UART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		if (!sirfport->hw_flow_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			goto usp_no_flow_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		if (of_find_property(np, "cts-gpios", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			sirfport->cts_gpio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 				of_get_named_gpio(np, "cts-gpios", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			sirfport->cts_gpio = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		if (of_find_property(np, "rts-gpios", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			sirfport->rts_gpio =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 				of_get_named_gpio(np, "rts-gpios", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			sirfport->rts_gpio = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		if ((!gpio_is_valid(sirfport->cts_gpio) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			 !gpio_is_valid(sirfport->rts_gpio))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				"Usp flow control must have cts and rts gpio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				"usp-cts-gpio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			dev_err(&pdev->dev, "Unable request cts gpio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		gpio_direction_input(sirfport->cts_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				"usp-rts-gpio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			dev_err(&pdev->dev, "Unable request rts gpio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		gpio_direction_output(sirfport->rts_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) usp_no_flow_control:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (of_device_is_compatible(np, "sirf,atlas7-uart") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	    of_device_is_compatible(np, "sirf,atlas7-usp-uart"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		sirfport->is_atlas7 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (of_property_read_u32(np, "fifosize", &port->fifosize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			"Unable to find fifosize in uart node.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (res == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		dev_err(&pdev->dev, "Insufficient resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	port->mapbase = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	port->membase = devm_ioremap(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (!port->membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		dev_err(&pdev->dev, "Cannot remap resource.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (res == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		dev_err(&pdev->dev, "Insufficient resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	port->irq = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	sirfport->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (IS_ERR(sirfport->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		ret = PTR_ERR(sirfport->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	port->uartclk = clk_get_rate(sirfport->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	port->ops = &sirfsoc_uart_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	spin_lock_init(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	platform_set_drvdata(pdev, sirfport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	sirfport->rx_dma_items.xmit.buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		&sirfport->rx_dma_items.dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (!sirfport->rx_dma_items.xmit.buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		dev_err(port->dev, "Uart alloc bufa failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		goto alloc_coherent_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	sirfport->rx_dma_items.xmit.head =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		sirfport->rx_dma_items.xmit.tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	if (sirfport->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (sirfport->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (sirfport->rx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		sirfport->is_hrt_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) alloc_coherent_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			sirfport->rx_dma_items.xmit.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			sirfport->rx_dma_items.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	dma_release_channel(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static int sirfsoc_uart_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	uart_remove_one_port(&sirfsoc_uart_drv, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	if (sirfport->rx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		dmaengine_terminate_all(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		dma_release_channel(sirfport->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 				sirfport->rx_dma_items.xmit.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 				sirfport->rx_dma_items.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (sirfport->tx_dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		dmaengine_terminate_all(sirfport->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		dma_release_channel(sirfport->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) sirfsoc_uart_suspend(struct device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	uart_suspend_port(&sirfsoc_uart_drv, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static int sirfsoc_uart_resume(struct device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	struct uart_port *port = &sirfport->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	uart_resume_port(&sirfsoc_uart_drv, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static struct platform_driver sirfsoc_uart_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	.probe		= sirfsoc_uart_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	.remove		= sirfsoc_uart_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		.name	= SIRFUART_PORT_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		.of_match_table = sirfsoc_uart_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		.pm	= &sirfsoc_uart_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static int __init sirfsoc_uart_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	ret = uart_register_driver(&sirfsoc_uart_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	ret = platform_driver_register(&sirfsoc_uart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		uart_unregister_driver(&sirfsoc_uart_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) module_init(sirfsoc_uart_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static void __exit sirfsoc_uart_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	platform_driver_unregister(&sirfsoc_uart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	uart_unregister_driver(&sirfsoc_uart_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) module_exit(sirfsoc_uart_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");