Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) Maxime Coquelin 2015
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) STMicroelectronics SA 2017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Authors:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *	     Gerald Baeza <gerald.baeza@st.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Inspired by st-asc.c from STMicroelectronics (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/pm_wakeirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/sysrq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "serial_mctrl_gpio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "stm32-usart.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static void stm32_usart_stop_tx(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static void stm32_usart_transmit_chars(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static inline struct stm32_port *to_stm32_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	return container_of(port, struct stm32_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	val = readl_relaxed(port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	val |= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	writel_relaxed(val, port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	val = readl_relaxed(port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	val &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	writel_relaxed(val, port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 					 u32 delay_DDE, u32 baud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	u32 rs485_deat_dedt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	bool over8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	*cr3 |= USART_CR3_DEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	over8 = *cr1 & USART_CR1_OVER8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	if (over8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		rs485_deat_dedt = delay_ADE * baud * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		rs485_deat_dedt = delay_ADE * baud * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			  rs485_deat_dedt_max : rs485_deat_dedt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			   USART_CR1_DEAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	*cr1 |= rs485_deat_dedt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (over8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		rs485_deat_dedt = delay_DDE * baud * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		rs485_deat_dedt = delay_DDE * baud * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 			  rs485_deat_dedt_max : rs485_deat_dedt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 			   USART_CR1_DEDT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	*cr1 |= rs485_deat_dedt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static int stm32_usart_config_rs485(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 				    struct serial_rs485 *rs485conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u32 usartdiv, baud, cr1, cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	bool over8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	port->rs485 = *rs485conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	rs485conf->flags |= SER_RS485_RX_DURING_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (rs485conf->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		cr1 = readl_relaxed(port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		cr3 = readl_relaxed(port->membase + ofs->cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		usartdiv = readl_relaxed(port->membase + ofs->brr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		usartdiv = usartdiv & GENMASK(15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		over8 = cr1 & USART_CR1_OVER8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		if (over8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 				   << USART_BRR_04_R_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		stm32_usart_config_reg_rs485(&cr1, &cr3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 					     rs485conf->delay_rts_before_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 					     rs485conf->delay_rts_after_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 					     baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			cr3 &= ~USART_CR3_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			cr3 |= USART_CR3_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		writel_relaxed(cr3, port->membase + ofs->cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		writel_relaxed(cr1, port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		stm32_usart_clr_bits(port, ofs->cr3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 				     USART_CR3_DEM | USART_CR3_DEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		stm32_usart_clr_bits(port, ofs->cr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static int stm32_usart_init_rs485(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 				  struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct serial_rs485 *rs485conf = &port->rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	rs485conf->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	rs485conf->delay_rts_before_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	rs485conf->delay_rts_after_send = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (!pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	return uart_get_rs485_mode(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				  int *last_res, bool threaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	*sr = readl_relaxed(port->membase + ofs->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (threaded && stm32_port->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		status = dmaengine_tx_status(stm32_port->rx_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 					     stm32_port->rx_ch->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 					     &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	} else if (*sr & USART_SR_RXNE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 					  int *last_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (stm32_port->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if ((*last_res) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			*last_res = RX_BUF_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		c = readl_relaxed(port->membase + ofs->rdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		/* apply RDR data mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		c &= stm32_port->rdr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct tty_port *tport = &port->state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u32 sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	char flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 				      threaded)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		sr |= USART_SR_DUMMY_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		flag = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		 * Status bits has to be cleared before reading the RDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		 * In FIFO mode, reading the RDR will pop the next data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		 * (if any) along with its status bits into the SR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		 * Not doing so leads to misalignement between RDR and SR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		 * and clear status bits of the next rx data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		 * Clear errors flags for stm32f7 and stm32h7 compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 * devices. On stm32f4 compatible devices, the error bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		 * cleared by the sequence [read SR - read DR].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			writel_relaxed(sr & USART_SR_ERR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				       port->membase + ofs->icr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		port->icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		if (sr & USART_SR_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			if (sr & USART_SR_ORE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				port->icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			} else if (sr & USART_SR_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 				port->icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			} else if (sr & USART_SR_FE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				/* Break detection if character is null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 				if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 					port->icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 					if (uart_handle_break(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 					port->icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			sr &= port->read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			if (sr & USART_SR_PE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 				flag = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			} else if (sr & USART_SR_FE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 					flag = TTY_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 					flag = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (uart_handle_sysrq_char(port, c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	tty_flip_buffer_push(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static void stm32_usart_tx_dma_complete(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct uart_port *port = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct stm32_port *stm32port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	dmaengine_terminate_async(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	stm32port->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	/* Let's see if we have pending data to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	stm32_usart_transmit_chars(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 * Enables TX FIFO threashold irq when FIFO is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * or TX empty irq when FIFO is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (stm32_port->fifoen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (stm32_port->fifoen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static void stm32_usart_transmit_chars_pio(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (stm32_port->tx_dma_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		stm32_port->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	while (!uart_circ_empty(xmit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		/* Check that TDR is empty before filling FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		port->icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* rely on TXE irq (mask or unmask) for sending remaining data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		stm32_usart_tx_interrupt_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		stm32_usart_tx_interrupt_enable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) static void stm32_usart_transmit_chars_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct stm32_port *stm32port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	unsigned int count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (stm32port->tx_dma_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	stm32port->tx_dma_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	count = uart_circ_chars_pending(xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (count > TX_BUF_L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		count = TX_BUF_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (xmit->tail < xmit->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		size_t one = UART_XMIT_SIZE - xmit->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		size_t two;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (one > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			one = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		two = count - one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (two)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	desc = dmaengine_prep_slave_single(stm32port->tx_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 					   stm32port->tx_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 					   count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 					   DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 					   DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		goto fallback_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	desc->callback = stm32_usart_tx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	desc->callback_param = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* Push current DMA TX transaction in the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (dma_submit_error(dmaengine_submit(desc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		/* dma no yet started, safe to free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		dmaengine_terminate_async(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		goto fallback_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* Issue pending DMA TX requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	dma_async_issue_pending(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	port->icount.tx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) fallback_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	for (i = count; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		stm32_usart_transmit_chars_pio(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static void stm32_usart_transmit_chars(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	u32 isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (port->x_char) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (stm32_port->tx_dma_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		/* Check that TDR is empty before filling FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 						  isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 						  (isr & USART_SR_TXE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 						  10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			dev_warn(port->dev, "1 character may be erased\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		port->x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		port->icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		if (stm32_port->tx_dma_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		stm32_usart_tx_interrupt_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (ofs->icr == UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (stm32_port->tx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		stm32_usart_transmit_chars_dma(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		stm32_usart_transmit_chars_pio(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if (uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		stm32_usart_tx_interrupt_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct uart_port *port = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct tty_port *tport = &port->state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	u32 sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	sr = readl_relaxed(port->membase + ofs->isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		writel_relaxed(USART_ICR_RTOCF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			       port->membase + ofs->icr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		/* Clear wake up flag and disable wake up interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		writel_relaxed(USART_ICR_WUCF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			       port->membase + ofs->icr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			pm_wakeup_event(tport->tty->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		stm32_usart_receive_chars(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		stm32_usart_transmit_chars(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (stm32_port->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct uart_port *port = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (stm32_port->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		stm32_usart_receive_chars(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static unsigned int stm32_usart_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	mctrl_gpio_set(stm32_port->gpios, mctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return mctrl_gpio_get(stm32_port->gpios, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static void stm32_usart_enable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void stm32_usart_disable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) /* Transmit stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static void stm32_usart_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct serial_rs485 *rs485conf = &port->rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	stm32_usart_tx_interrupt_disable(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (rs485conf->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			mctrl_gpio_set(stm32_port->gpios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 					stm32_port->port.mctrl & ~TIOCM_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			mctrl_gpio_set(stm32_port->gpios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 					stm32_port->port.mctrl | TIOCM_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) /* There are probably characters waiting to be transmitted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void stm32_usart_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct serial_rs485 *rs485conf = &port->rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (uart_circ_empty(xmit) && !port->x_char)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (rs485conf->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			mctrl_gpio_set(stm32_port->gpios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 					stm32_port->port.mctrl | TIOCM_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			mctrl_gpio_set(stm32_port->gpios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 					stm32_port->port.mctrl & ~TIOCM_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	stm32_usart_transmit_chars(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) /* Throttle the remote when input buffer is about to overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static void stm32_usart_throttle(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (stm32_port->cr3_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) /* Unthrottle the remote, the input buffer can now accept data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static void stm32_usart_unthrottle(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	if (stm32_port->cr3_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) /* Receive stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static void stm32_usart_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (stm32_port->cr3_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) /* Handle breaks - ignored by us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) static int stm32_usart_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	const char *name = to_platform_device(port->dev)->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				   stm32_usart_threaded_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				   name, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/* RX FIFO Flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (ofs->rqr != UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	/* RX enabling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	stm32_usart_set_bits(port, ofs->cr1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) static void stm32_usart_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	u32 val, isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/* Disable modem control interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	stm32_usart_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	val = USART_CR1_TXEIE | USART_CR1_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	val |= stm32_port->cr1_irq | USART_CR1_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	val |= BIT(cfg->uart_enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (stm32_port->fifoen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		val |= USART_CR1_FIFOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 					 isr, (isr & USART_SR_TC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					 10, 100000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		dev_err(port->dev, "transmission complete not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/* flush RX & TX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (ofs->rqr != UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			       port->membase + ofs->rqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	stm32_usart_clr_bits(port, ofs->cr1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	free_irq(port->irq, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static unsigned int stm32_usart_get_databits(struct ktermios *termios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	unsigned int bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	tcflag_t cflag = termios->c_cflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	switch (cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 * CSIZE settings are not necessarily supported in hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 * CSIZE unsupported configurations are handled here to set word length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * to 8 bits word as default configuration and to print debug message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	case CS5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		bits = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	case CS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		bits = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	case CS7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		bits = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	/* default including CS8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) static void stm32_usart_set_termios(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 				    struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				    struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct serial_rs485 *rs485conf = &port->rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	unsigned int baud, bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	u32 usartdiv, mantissa, fraction, oversampling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	tcflag_t cflag = termios->c_cflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	u32 cr1, cr2, cr3, isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (!stm32_port->hw_flow_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		cflag &= ~CRTSCTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 						isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 						(isr & USART_SR_TC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 						10, 100000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	/* Send the TC error message only when ISR_TC is not set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		dev_err(port->dev, "Transmission is not complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	/* Stop serial port and reset value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	writel_relaxed(0, port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* flush RX & TX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (ofs->rqr != UNDEF_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			       port->membase + ofs->rqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	cr1 = USART_CR1_TE | USART_CR1_RE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (stm32_port->fifoen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		cr1 |= USART_CR1_FIFOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	cr2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	/* Tx and RX FIFO configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	cr3 = readl_relaxed(port->membase + ofs->cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (stm32_port->fifoen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (cflag & CSTOPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		cr2 |= USART_CR2_STOP_2B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bits = stm32_usart_get_databits(termios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	stm32_port->rdr_mask = (BIT(bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	if (cflag & PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		bits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		cr1 |= USART_CR1_PCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * Word length configuration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	 * M0 and M1 already cleared by cr1 initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (bits == 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		cr1 |= USART_CR1_M0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	else if ((bits == 7) && cfg->has_7bits_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		cr1 |= USART_CR1_M1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	else if (bits != 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 				       stm32_port->fifoen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		if (cflag & CSTOPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			bits = bits + 3; /* 1 start bit + 2 stop bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			bits = bits + 2; /* 1 start bit + 1 stop bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		/* RX timeout irq to occur after last stop bit + bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		stm32_port->cr1_irq = USART_CR1_RTOIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		writel_relaxed(bits, port->membase + ofs->rtor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		cr2 |= USART_CR2_RTOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		/* Not using dma, enable fifo threshold irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		if (!stm32_port->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			stm32_port->cr3_irq =  USART_CR3_RXFTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	cr1 |= stm32_port->cr1_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	cr3 |= stm32_port->cr3_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		cr1 |= USART_CR1_PS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (cflag & CRTSCTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	 * The USART supports 16 or 8 times oversampling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	 * By default we prefer 16 times oversampling, so that the receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	 * has a better tolerance to clock deviations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	 * 8 times oversampling is only used to achieve higher speeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (usartdiv < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		oversampling = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		cr1 |= USART_CR1_OVER8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		oversampling = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		cr1 &= ~USART_CR1_OVER8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	fraction = usartdiv % oversampling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	uart_update_timeout(port, cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	port->read_status_mask = USART_SR_ORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		port->read_status_mask |= USART_SR_PE | USART_SR_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		port->read_status_mask |= USART_SR_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* Characters to ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	port->ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		port->ignore_status_mask |= USART_SR_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		 * If we're ignoring parity and break indicators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		 * ignore overruns too (for real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			port->ignore_status_mask |= USART_SR_ORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* Ignore all characters if CREAD is not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if ((termios->c_cflag & CREAD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		port->ignore_status_mask |= USART_SR_DUMMY_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (stm32_port->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		cr3 |= USART_CR3_DMAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (rs485conf->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		stm32_usart_config_reg_rs485(&cr1, &cr3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 					     rs485conf->delay_rts_before_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 					     rs485conf->delay_rts_after_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 					     baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			cr3 &= ~USART_CR3_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			cr3 |= USART_CR3_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* Configure wake up from low power on start bit detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (stm32_port->wakeirq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		cr3 &= ~USART_CR3_WUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		cr3 |= USART_CR3_WUS_START_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	writel_relaxed(cr3, port->membase + ofs->cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	writel_relaxed(cr2, port->membase + ofs->cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	writel_relaxed(cr1, port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* Handle modem control interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (UART_ENABLE_MS(port, termios->c_cflag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		stm32_usart_enable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		stm32_usart_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static const char *stm32_usart_type(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) static void stm32_usart_release_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static int stm32_usart_request_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static void stm32_usart_config_port(struct uart_port *port, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (flags & UART_CONFIG_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		port->type = PORT_STM32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	/* No user changeable parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static void stm32_usart_pm(struct uart_port *port, unsigned int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			   unsigned int oldstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct stm32_port *stm32port = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			struct stm32_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	case UART_PM_STATE_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		pm_runtime_get_sync(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	case UART_PM_STATE_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		pm_runtime_put_sync(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static const struct uart_ops stm32_uart_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	.tx_empty	= stm32_usart_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	.set_mctrl	= stm32_usart_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	.get_mctrl	= stm32_usart_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	.stop_tx	= stm32_usart_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	.start_tx	= stm32_usart_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	.throttle	= stm32_usart_throttle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	.unthrottle	= stm32_usart_unthrottle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	.stop_rx	= stm32_usart_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	.enable_ms	= stm32_usart_enable_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	.break_ctl	= stm32_usart_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	.startup	= stm32_usart_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	.shutdown	= stm32_usart_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	.set_termios	= stm32_usart_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	.pm		= stm32_usart_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	.type		= stm32_usart_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	.release_port	= stm32_usart_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	.request_port	= stm32_usart_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	.config_port	= stm32_usart_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	.verify_port	= stm32_usart_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int stm32_usart_init_port(struct stm32_port *stm32port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				 struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct uart_port *port = &stm32port->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		return ret ? : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	port->iotype	= UPIO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	port->flags	= UPF_BOOT_AUTOCONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	port->ops	= &stm32_uart_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	port->dev	= &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	port->fifosize	= stm32port->info->cfg.fifosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	port->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	port->rs485_config = stm32_usart_config_rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	ret = stm32_usart_init_rs485(port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (stm32port->info->cfg.has_wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			return stm32port->wakeirq ? : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	stm32port->fifoen = stm32port->info->cfg.has_fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	port->membase = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (IS_ERR(port->membase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		return PTR_ERR(port->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	port->mapbase = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	spin_lock_init(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	stm32port->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (IS_ERR(stm32port->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return PTR_ERR(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* Ensure that clk rate is correct by enabling the clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	ret = clk_prepare_enable(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	stm32port->port.uartclk = clk_get_rate(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (!stm32port->port.uartclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (IS_ERR(stm32port->gpios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		ret = PTR_ERR(stm32port->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	/* Both CTS/RTS gpios and "st,hw-flow-ctrl" should not be specified */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	if (stm32port->hw_flow_control) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		    mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) err_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	clk_disable_unprepare(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	id = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (WARN_ON(id >= STM32_MAX_PORTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	stm32_ports[id].hw_flow_control =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		of_property_read_bool (np, "uart-has-rtscts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	stm32_ports[id].port.line = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	stm32_ports[id].cr3_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	stm32_ports[id].last_res = RX_BUF_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	return &stm32_ports[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const struct of_device_id stm32_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) MODULE_DEVICE_TABLE(of, stm32_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				       struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	struct uart_port *port = &stm32port->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct dma_async_tx_descriptor *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	 * Using DMA and threaded handler for the console could lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	 * deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (uart_console(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	/* Request DMA RX channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	stm32port->rx_ch = dma_request_slave_channel(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (!stm32port->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		dev_info(dev, "rx dma alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 					       &stm32port->rx_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 					       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (!stm32port->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		goto alloc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/* Configure DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	config.src_addr = port->mapbase + ofs->rdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	ret = dmaengine_slave_config(stm32port->rx_ch, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		dev_err(dev, "rx dma channel config failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	/* Prepare a DMA cyclic transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 					 stm32port->rx_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 					 RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 					 DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		dev_err(dev, "rx dma prep cyclic failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	/* No callback as dma buffer is drained on usart interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	desc->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	desc->callback_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	/* Push current DMA transaction in the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	ret = dma_submit_error(dmaengine_submit(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		dmaengine_terminate_sync(stm32port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	/* Issue pending DMA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	dma_async_issue_pending(stm32port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) config_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			  RX_BUF_L, stm32port->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			  stm32port->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) alloc_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	dma_release_channel(stm32port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	stm32port->rx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				       struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct uart_port *port = &stm32port->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	stm32port->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/* Request DMA TX channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	stm32port->tx_ch = dma_request_slave_channel(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (!stm32port->tx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		dev_info(dev, "tx dma alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 					       &stm32port->tx_dma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 					       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (!stm32port->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		goto alloc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* Configure DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	config.dst_addr = port->mapbase + ofs->tdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	ret = dmaengine_slave_config(stm32port->tx_ch, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		dev_err(dev, "tx dma channel config failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) config_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			  TX_BUF_L, stm32port->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			  stm32port->tx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) alloc_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	dma_release_channel(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	stm32port->tx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static int stm32_usart_serial_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	struct stm32_port *stm32port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	stm32port = stm32_usart_of_get_port(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (!stm32port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	stm32port->info = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (!stm32port->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	ret = stm32_usart_init_port(stm32port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	if (stm32port->wakeirq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		ret = device_init_wakeup(&pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			goto err_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 						    stm32port->wakeirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			goto err_nowup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		device_set_wakeup_enable(&pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	ret = stm32_usart_of_dma_tx_probe(stm32port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	platform_set_drvdata(pdev, &stm32port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		goto err_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	pm_runtime_put_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) err_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (stm32port->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		dmaengine_terminate_async(stm32port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		dma_release_channel(stm32port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (stm32port->rx_dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 				  RX_BUF_L, stm32port->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				  stm32port->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (stm32port->tx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		dmaengine_terminate_async(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		dma_release_channel(stm32port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (stm32port->tx_dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				  TX_BUF_L, stm32port->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 				  stm32port->tx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	if (stm32port->wakeirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		dev_pm_clear_wake_irq(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err_nowup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	if (stm32port->wakeirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		device_init_wakeup(&pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) err_uninit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	clk_disable_unprepare(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int stm32_usart_serial_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	struct uart_port *port = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	err = uart_remove_one_port(&stm32_usart_driver, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		return(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (stm32_port->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		dmaengine_terminate_async(stm32_port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		dma_release_channel(stm32_port->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (stm32_port->rx_dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 				  RX_BUF_L, stm32_port->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				  stm32_port->rx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if (stm32_port->tx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		dmaengine_terminate_async(stm32_port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		dma_release_channel(stm32_port->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (stm32_port->tx_dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				  TX_BUF_L, stm32_port->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				  stm32_port->tx_dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (stm32_port->wakeirq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		dev_pm_clear_wake_irq(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		device_init_wakeup(&pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	clk_disable_unprepare(stm32_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) #ifdef CONFIG_SERIAL_STM32_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void stm32_usart_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	writel_relaxed(ch, port->membase + ofs->tdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static void stm32_usart_console_write(struct console *co, const char *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				      unsigned int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	struct uart_port *port = &stm32_ports[co->index].port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	u32 old_cr1, new_cr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (port->sysrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	else if (oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		locked = spin_trylock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	/* Save and disable interrupts, enable the transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* Restore interrupt state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int stm32_usart_console_setup(struct console *co, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct stm32_port *stm32port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	int baud = 9600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	int bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	int parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	int flow = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (co->index >= STM32_MAX_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	stm32port = &stm32_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 * This driver does not support early console initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * (use ARM early printk support instead), so we only expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 * this to be called during the uart port registration when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 * driver gets probed and the port should be mapped at that point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	if (options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		uart_parse_options(options, &baud, &parity, &bits, &flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static struct console stm32_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	.name		= STM32_SERIAL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	.device		= uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	.write		= stm32_usart_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	.setup		= stm32_usart_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	.flags		= CON_PRINTBUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	.index		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	.data		= &stm32_usart_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) #define STM32_SERIAL_CONSOLE (&stm32_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) #define STM32_SERIAL_CONSOLE NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #endif /* CONFIG_SERIAL_STM32_CONSOLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static struct uart_driver stm32_usart_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	.driver_name	= DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	.dev_name	= STM32_SERIAL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	.major		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	.minor		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	.nr		= STM32_MAX_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	.cons		= STM32_SERIAL_CONSOLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 							bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	struct stm32_port *stm32_port = to_stm32_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (stm32_port->wakeirq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	 * Enable low-power wake-up and wake-up irq if argument is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	 * "enable", disable low-power wake-up and wake-up irq otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	struct uart_port *port = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	uart_suspend_port(&stm32_usart_driver, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		stm32_usart_serial_en_wakeup(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		stm32_usart_serial_en_wakeup(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	 * and rely on bootloader stage to restore this state upon resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 * Otherwise, apply the idle or sleep states depending on wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 * capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (console_suspend_enabled || !uart_console(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			pinctrl_pm_select_idle_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	struct uart_port *port = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	if (device_may_wakeup(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		stm32_usart_serial_en_wakeup(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	return uart_resume_port(&stm32_usart_driver, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct uart_port *port = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	struct stm32_port *stm32port = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			struct stm32_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	clk_disable_unprepare(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	struct uart_port *port = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	struct stm32_port *stm32port = container_of(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			struct stm32_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	return clk_prepare_enable(stm32port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) static const struct dev_pm_ops stm32_serial_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			   stm32_usart_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 				stm32_usart_serial_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static struct platform_driver stm32_serial_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	.probe		= stm32_usart_serial_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	.remove		= stm32_usart_serial_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		.name	= DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		.pm	= &stm32_serial_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		.of_match_table = of_match_ptr(stm32_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static int __init stm32_usart_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	static char banner[] __initdata = "STM32 USART driver initialized";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	pr_info("%s\n", banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	ret = uart_register_driver(&stm32_usart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	ret = platform_driver_register(&stm32_serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		uart_unregister_driver(&stm32_usart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static void __exit stm32_usart_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	platform_driver_unregister(&stm32_serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	uart_unregister_driver(&stm32_usart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) module_init(stm32_usart_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) module_exit(stm32_usart_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) MODULE_ALIAS("platform:" DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) MODULE_LICENSE("GPL v2");