Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Driver for AMBA serial ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Copyright 1999 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  Copyright (C) 2010 ST-Ericsson SA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * This is a generic driver for ARM AMBA-type serial ports.  They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * have a lot of 16550-like features, but are not register compatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Note that although they do have CTS, DCD and DSR inputs, they do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * not have an RI input, nor do they have DTR or RTS outputs.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * required, these have to be supplied via some other means (eg, GPIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * and hooked into this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sysrq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/amba/bus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/amba/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "amba-pl011.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define UART_NR			14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define SERIAL_AMBA_MAJOR	204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define SERIAL_AMBA_MINOR	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define SERIAL_AMBA_NR		UART_NR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define AMBA_ISR_PASS_LIMIT	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define UART_DR_ERROR		(UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define UART_DUMMY_DR_RX	(1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	[REG_DR] = UART01x_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	[REG_FR] = UART01x_FR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	[REG_LCRH_RX] = UART011_LCRH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	[REG_LCRH_TX] = UART011_LCRH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	[REG_IBRD] = UART011_IBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	[REG_FBRD] = UART011_FBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	[REG_CR] = UART011_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	[REG_IFLS] = UART011_IFLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	[REG_IMSC] = UART011_IMSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	[REG_RIS] = UART011_RIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	[REG_MIS] = UART011_MIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	[REG_ICR] = UART011_ICR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	[REG_DMACR] = UART011_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /* There is by now at least one vendor with differing details, so handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) struct vendor_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	const u16		*reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	unsigned int		ifls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	unsigned int		fr_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	unsigned int		fr_dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	unsigned int		fr_cts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	unsigned int		fr_ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	unsigned int		inv_fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	bool			access_32b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	bool			oversampling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	bool			dma_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	bool			cts_event_workaround;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	bool			always_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	bool			fixed_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	unsigned int (*get_fifosize)(struct amba_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static unsigned int get_fifosize_arm(struct amba_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	return amba_rev(dev) < 3 ? 16 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static struct vendor_data vendor_arm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.reg_offset		= pl011_std_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	.fr_busy		= UART01x_FR_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	.fr_dsr			= UART01x_FR_DSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	.fr_cts			= UART01x_FR_CTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	.fr_ri			= UART011_FR_RI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	.oversampling		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	.dma_threshold		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	.cts_event_workaround	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	.always_enabled		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	.fixed_options		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	.get_fifosize		= get_fifosize_arm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static const struct vendor_data vendor_sbsa = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	.reg_offset		= pl011_std_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	.fr_busy		= UART01x_FR_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	.fr_dsr			= UART01x_FR_DSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	.fr_cts			= UART01x_FR_CTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	.fr_ri			= UART011_FR_RI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	.access_32b		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	.oversampling		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	.dma_threshold		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	.cts_event_workaround	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	.always_enabled		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	.fixed_options		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #ifdef CONFIG_ACPI_SPCR_TABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static const struct vendor_data vendor_qdt_qdf2400_e44 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	.reg_offset		= pl011_std_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	.fr_busy		= UART011_FR_TXFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	.fr_dsr			= UART01x_FR_DSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	.fr_cts			= UART01x_FR_CTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.fr_ri			= UART011_FR_RI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.inv_fr			= UART011_FR_TXFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.access_32b		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.oversampling		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	.dma_threshold		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	.cts_event_workaround	= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	.always_enabled		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	.fixed_options		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	[REG_DR] = UART01x_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	[REG_ST_DMAWM] = ST_UART011_DMAWM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	[REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	[REG_FR] = UART01x_FR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	[REG_LCRH_RX] = ST_UART011_LCRH_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	[REG_LCRH_TX] = ST_UART011_LCRH_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	[REG_IBRD] = UART011_IBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	[REG_FBRD] = UART011_FBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	[REG_CR] = UART011_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	[REG_IFLS] = UART011_IFLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	[REG_IMSC] = UART011_IMSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	[REG_RIS] = UART011_RIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	[REG_MIS] = UART011_MIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	[REG_ICR] = UART011_ICR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	[REG_DMACR] = UART011_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	[REG_ST_XFCR] = ST_UART011_XFCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	[REG_ST_XON1] = ST_UART011_XON1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	[REG_ST_XON2] = ST_UART011_XON2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	[REG_ST_XOFF1] = ST_UART011_XOFF1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	[REG_ST_XOFF2] = ST_UART011_XOFF2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	[REG_ST_ITCR] = ST_UART011_ITCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	[REG_ST_ITIP] = ST_UART011_ITIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	[REG_ST_ABCR] = ST_UART011_ABCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	[REG_ST_ABIMSC] = ST_UART011_ABIMSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static unsigned int get_fifosize_st(struct amba_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static struct vendor_data vendor_st = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	.reg_offset		= pl011_st_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	.ifls			= UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	.fr_busy		= UART01x_FR_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	.fr_dsr			= UART01x_FR_DSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	.fr_cts			= UART01x_FR_CTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	.fr_ri			= UART011_FR_RI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	.oversampling		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	.dma_threshold		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	.cts_event_workaround	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	.always_enabled		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	.fixed_options		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	.get_fifosize		= get_fifosize_st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	[REG_DR] = ZX_UART011_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	[REG_FR] = ZX_UART011_FR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	[REG_LCRH_RX] = ZX_UART011_LCRH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	[REG_LCRH_TX] = ZX_UART011_LCRH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	[REG_IBRD] = ZX_UART011_IBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	[REG_FBRD] = ZX_UART011_FBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	[REG_CR] = ZX_UART011_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	[REG_IFLS] = ZX_UART011_IFLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	[REG_IMSC] = ZX_UART011_IMSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	[REG_RIS] = ZX_UART011_RIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	[REG_MIS] = ZX_UART011_MIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	[REG_ICR] = ZX_UART011_ICR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	[REG_DMACR] = ZX_UART011_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static unsigned int get_fifosize_zte(struct amba_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static struct vendor_data vendor_zte = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	.reg_offset		= pl011_zte_offsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	.access_32b		= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	.fr_busy		= ZX_UART01x_FR_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	.fr_dsr			= ZX_UART01x_FR_DSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	.fr_cts			= ZX_UART01x_FR_CTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	.fr_ri			= ZX_UART011_FR_RI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	.get_fifosize		= get_fifosize_zte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) /* Deals with DMA transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) struct pl011_sgbuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) struct pl011_dmarx_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct dma_chan		*chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	struct completion	complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	bool			use_buf_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct pl011_sgbuf	sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct pl011_sgbuf	sgbuf_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	dma_cookie_t		cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	bool			running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct timer_list	timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	unsigned int last_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	unsigned long last_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	bool auto_poll_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	unsigned int poll_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	unsigned int poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) struct pl011_dmatx_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct dma_chan		*chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct scatterlist	sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	char			*buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	bool			queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * We wrap our port structure around the generic uart_port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) struct uart_amba_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct uart_port	port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	const u16		*reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	const struct vendor_data *vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	unsigned int		dmacr;		/* dma control reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	unsigned int		im;		/* interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	unsigned int		old_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	unsigned int		fifosize;	/* vendor-specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	unsigned int		old_cr;		/* state during shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	unsigned int		fixed_baud;	/* vendor-set fixed baud rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	char			type[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #ifdef CONFIG_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* DMA stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	bool			using_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	bool			using_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct pl011_dmarx_data dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct pl011_dmatx_data	dmatx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	bool			dma_probed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return uap->reg_offset[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static unsigned int pl011_read(const struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return (uap->port.iotype == UPIO_MEM32) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		readl_relaxed(addr) : readw_relaxed(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (uap->port.iotype == UPIO_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		writel_relaxed(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		writew_relaxed(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  * Reads up to 256 characters from the FIFO or until it's empty and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  * inserts them into the TTY layer. Returns the number of characters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * read from the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static int pl011_fifo_to_tty(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	unsigned int ch, flag, fifotaken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	int sysrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	for (fifotaken = 0; fifotaken != 256; fifotaken++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		status = pl011_read(uap, REG_FR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		if (status & UART01x_FR_RXFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		/* Take chars from the FIFO and update status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		flag = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		uap->port.icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		if (unlikely(ch & UART_DR_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			if (ch & UART011_DR_BE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				ch &= ~(UART011_DR_FE | UART011_DR_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				uap->port.icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				if (uart_handle_break(&uap->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			} else if (ch & UART011_DR_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 				uap->port.icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			else if (ch & UART011_DR_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 				uap->port.icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			if (ch & UART011_DR_OE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 				uap->port.icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			ch &= uap->port.read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			if (ch & UART011_DR_BE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				flag = TTY_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			else if (ch & UART011_DR_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				flag = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			else if (ch & UART011_DR_FE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 				flag = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		spin_unlock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		spin_lock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		if (!sysrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return fifotaken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * All the DMA operation mode stuff goes inside this ifdef.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * This assumes that you have a generic DMA device interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * no custom DMA interfaces are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) #ifdef CONFIG_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	sg->buf = dma_alloc_coherent(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (!sg->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	sg_init_table(&sg->sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	sg_set_page(&sg->sg, phys_to_page(dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	sg_dma_address(&sg->sg) = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (sg->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		dma_free_coherent(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			PL011_DMA_BUFFER_SIZE, sg->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			sg_dma_address(&sg->sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static void pl011_dma_probe(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* DMA is the sole user of the platform data right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct device *dev = uap->port.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct dma_slave_config tx_conf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		.dst_addr = uap->port.mapbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 				 pl011_reg_to_offset(uap, REG_DR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		.direction = DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		.dst_maxburst = uap->fifosize >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		.device_fc = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	uap->dma_probed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	chan = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (IS_ERR(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (PTR_ERR(chan) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			uap->dma_probed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		/* We need platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (!plat || !plat->dma_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			dev_info(uap->port.dev, "no DMA platform data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		/* Try to acquire a generic DMA engine slave TX channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		chan = dma_request_channel(mask, plat->dma_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 						plat->dma_tx_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			dev_err(uap->port.dev, "no TX DMA channel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	dmaengine_slave_config(chan, &tx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	uap->dmatx.chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	dev_info(uap->port.dev, "DMA channel TX %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		 dma_chan_name(uap->dmatx.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	/* Optionally make use of an RX channel as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	chan = dma_request_slave_channel(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if (!chan && plat && plat->dma_rx_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			dev_err(uap->port.dev, "no RX DMA channel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		struct dma_slave_config rx_conf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			.src_addr = uap->port.mapbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				pl011_reg_to_offset(uap, REG_DR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			.direction = DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			.src_maxburst = uap->fifosize >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			.device_fc = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		struct dma_slave_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		 * Some DMA controllers provide information on their capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 * If the controller does, check for suitable residue processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		 * otherwise assime all is well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		if (0 == dma_get_slave_caps(chan, &caps)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			if (caps.residue_granularity ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 					DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 				dev_info(uap->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 					"RX DMA disabled - no residue processing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		dmaengine_slave_config(chan, &rx_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		uap->dmarx.chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		uap->dmarx.auto_poll_rate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		if (plat && plat->dma_rx_poll_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			/* Set poll rate if specified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			if (plat->dma_rx_poll_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 				uap->dmarx.auto_poll_rate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 				uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 				 * 100 ms defaults to poll rate if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 				 * specified. This will be adjusted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				 * the baud rate at set_termios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				uap->dmarx.auto_poll_rate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				uap->dmarx.poll_rate =  100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			/* 3 secs defaults poll_timeout if not specified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			if (plat->dma_rx_poll_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 				uap->dmarx.poll_timeout =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 					plat->dma_rx_poll_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 				uap->dmarx.poll_timeout = 3000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		} else if (!plat && dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			uap->dmarx.auto_poll_rate = of_property_read_bool(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 						dev->of_node, "auto-poll");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			if (uap->dmarx.auto_poll_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				u32 x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				if (0 == of_property_read_u32(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 						"poll-rate-ms", &x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 					uap->dmarx.poll_rate = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 					uap->dmarx.poll_rate = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				if (0 == of_property_read_u32(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 						"poll-timeout-ms", &x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 					uap->dmarx.poll_timeout = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 					uap->dmarx.poll_timeout = 3000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		dev_info(uap->port.dev, "DMA channel RX %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			 dma_chan_name(uap->dmarx.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static void pl011_dma_remove(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (uap->dmatx.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		dma_release_channel(uap->dmatx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (uap->dmarx.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		dma_release_channel(uap->dmarx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) /* Forward declare these for the refill routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) static int pl011_dma_tx_refill(struct uart_amba_port *uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) static void pl011_start_tx_pio(struct uart_amba_port *uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * The current DMA TX buffer has been sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * Try to queue up another DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) static void pl011_dma_tx_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct uart_amba_port *uap = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct pl011_dmatx_data *dmatx = &uap->dmatx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	u16 dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	spin_lock_irqsave(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (uap->dmatx.queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dmacr = uap->dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	uap->dmacr = dmacr & ~UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * If TX DMA was disabled, it means that we've stopped the DMA for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 * some reason (eg, XOFF received, or we want to send an X-char.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 * Note: we need to be careful here of a potential race between DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * and the rest of the driver - if the driver disables TX DMA while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * a TX buffer completing, we must update the tx queued status to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 * get further refills (hence we check dmacr).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	    uart_circ_empty(&uap->port.state->xmit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		spin_unlock_irqrestore(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (pl011_dma_tx_refill(uap) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 * We didn't queue a DMA buffer for some reason, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 * have data pending to be sent.  Re-enable the TX IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		pl011_start_tx_pio(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	spin_unlock_irqrestore(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * Try to refill the TX DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  *   1 if we queued up a TX DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  *   0 if we didn't want to handle this by DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  *  <0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static int pl011_dma_tx_refill(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct pl011_dmatx_data *dmatx = &uap->dmatx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct dma_chan *chan = dmatx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct dma_device *dma_dev = chan->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct circ_buf *xmit = &uap->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * Try to avoid the overhead involved in using DMA if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * transaction fits in the first half of the FIFO, by using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * the standard interrupt handling.  This ensures that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 * issue a uart_write_wakeup() at the appropriate time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	count = uart_circ_chars_pending(xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (count < (uap->fifosize >> 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * Bodge: don't send the last character by DMA, as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * will prevent XON from notifying us to restart DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	count -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (count > PL011_DMA_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		count = PL011_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (xmit->tail < xmit->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		size_t first = UART_XMIT_SIZE - xmit->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		size_t second;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (first > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			first = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		second = count - first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		if (second)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			memcpy(&dmatx->buf[first], &xmit->buf[0], second);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	dmatx->sg.length = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		dev_dbg(uap->port.dev, "unable to map TX DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 * If DMA cannot be used right now, we complete this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 * transaction via IRQ and let the TTY layer retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		dev_dbg(uap->port.dev, "TX DMA busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* Some data to go along to the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	desc->callback = pl011_dma_tx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	desc->callback_param = uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/* All errors should happen at prepare time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	/* Fire the DMA transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	dma_dev->device_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	uap->dmacr |= UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	uap->dmatx.queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * Now we know that DMA will fire, so advance the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 * with the stuff we just dispatched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	uap->port.icount.tx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		uart_write_wakeup(&uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * We received a transmit interrupt without a pending X-char but with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * pending characters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  *   false if we want to use PIO to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  *   true if we queued a DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (!uap->using_tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * If we already have a TX buffer queued, but received a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * TX interrupt, it will be because we've just sent an X-char.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (uap->dmatx.queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		uap->dmacr |= UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		uap->im &= ~UART011_TXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * We don't have a TX buffer queued, so try to queue one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * If we successfully queued a buffer, mask the TX IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (pl011_dma_tx_refill(uap) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		uap->im &= ~UART011_TXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729)  * Stop the DMA transmit (eg, due to received XOFF).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (uap->dmatx.queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		uap->dmacr &= ~UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * Try to start a DMA transmit, or in the case of an XON/OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * character queued for send, try to get that character out ASAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  *   false if we want the TX IRQ to be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  *   true if we have a buffer queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	u16 dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (!uap->using_tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (!uap->port.x_char) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		/* no X-char, try to push chars out in DMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if (!uap->dmatx.queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			if (pl011_dma_tx_refill(uap) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				uap->im &= ~UART011_TXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		} else if (!(uap->dmacr & UART011_TXDMAE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			uap->dmacr |= UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * We have an X-char to send.  Disable DMA to prevent it loading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 * the TX fifo, and then see if we can stuff it into the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	dmacr = uap->dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	uap->dmacr &= ~UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		 * No space in the FIFO, so enable the transmit interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		 * so we know when there is space.  Note that once we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		 * loaded the character, we should just re-enable DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	pl011_write(uap->port.x_char, uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	uap->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	uap->port.x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	/* Success - restore the DMA state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	uap->dmacr = dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	pl011_write(dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  * Flush the transmit buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static void pl011_dma_flush_buffer(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) __releases(&uap->port.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) __acquires(&uap->port.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (!uap->using_tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	dmaengine_terminate_async(uap->dmatx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (uap->dmatx.queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		uap->dmacr &= ~UART011_TXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void pl011_dma_rx_callback(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct dma_chan *rxchan = uap->dmarx.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct pl011_sgbuf *sgbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (!rxchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	/* Start the RX DMA job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	sgbuf = uap->dmarx.use_buf_b ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 					DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * If the DMA engine is busy and cannot prepare a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * channel, no big deal, the driver will fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * to interrupt mode as a result of this error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		uap->dmarx.running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		dmaengine_terminate_all(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* Some data to go along to the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	desc->callback = pl011_dma_rx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	desc->callback_param = uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	dmarx->cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	dma_async_issue_pending(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	uap->dmacr |= UART011_RXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	uap->dmarx.running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	uap->im &= ~UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * This is called when either the DMA job is complete, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * the FIFO timeout interrupt occurred. This must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * with the port spinlock uap->port.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static void pl011_dma_rx_chars(struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			       u32 pending, bool use_buf_b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			       bool readfifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct tty_port *port = &uap->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct pl011_sgbuf *sgbuf = use_buf_b ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int dma_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	u32 fifotaken = 0; /* only used for vdbg() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	int dmataken = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (uap->dmarx.poll_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		/* The data can be taken by polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		dmataken = sgbuf->sg.length - dmarx->last_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		/* Recalculate the pending size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		if (pending >= dmataken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			pending -= dmataken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	/* Pick the remain data from the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		 * First take all chars in the DMA pipe, then look in the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		 * Note that tty_insert_flip_buf() tries to take as many chars
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		 * as it can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		uap->port.icount.rx += dma_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		if (dma_count < pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			dev_warn(uap->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				 "couldn't insert all characters (TTY is full?)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/* Reset the last_residue for Rx DMA poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (uap->dmarx.poll_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		dmarx->last_residue = sgbuf->sg.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 * Only continue with trying to read the FIFO if all DMA chars have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * been taken first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (dma_count == pending && readfifo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		/* Clear any error flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			    UART011_FEIS, uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * If we read all the DMA'd characters, and we had an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 * incomplete buffer, that could be due to an rx error, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 * maybe we just timed out. Read any pending chars and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		 * the error status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		 * Error conditions will only occur in the FIFO, these will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		 * trigger an immediate interrupt and stop the DMA job, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		 * will always find the error in the FIFO, never in the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		 * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		fifotaken = pl011_fifo_to_tty(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	spin_unlock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	dev_vdbg(uap->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 dma_count, fifotaken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	spin_lock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) static void pl011_dma_rx_irq(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct dma_chan *rxchan = dmarx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	size_t pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	enum dma_status dmastat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * Pause the transfer so we can trust the current counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * do this before we pause the PL011 block, else we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * overflow the FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (dmaengine_pause(rxchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	dmastat = rxchan->device->device_tx_status(rxchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 						   dmarx->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (dmastat != DMA_PAUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		dev_err(uap->port.dev, "unable to pause DMA transfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/* Disable RX DMA - incoming data will wait in the FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	uap->dmacr &= ~UART011_RXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	uap->dmarx.running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	pending = sgbuf->sg.length - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	/* Then we terminate the transfer - we now know our residue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	dmaengine_terminate_all(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 * This will take the chars we have so far and insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	 * into the framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	/* Switch buffer & re-trigger DMA job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	dmarx->use_buf_b = !dmarx->use_buf_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (pl011_dma_rx_trigger_dma(uap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			"fall back to interrupt mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		uap->im |= UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static void pl011_dma_rx_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct uart_amba_port *uap = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct dma_chan *rxchan = dmarx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	bool lastbuf = dmarx->use_buf_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	size_t pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * This completion interrupt occurs typically when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * RX buffer is totally stuffed but no timeout has yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * occurred. When that happens, we just want the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * routine to flush out the secondary DMA buffer while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * we immediately trigger the next DMA job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * Rx data can be taken by the UART interrupts during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * the DMA irq handler. So we check the residue here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	pending = sgbuf->sg.length - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/* Then we terminate the transfer - we now know our residue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	dmaengine_terminate_all(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	uap->dmarx.running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	dmarx->use_buf_b = !lastbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	ret = pl011_dma_rx_trigger_dma(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	pl011_dma_rx_chars(uap, pending, lastbuf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 * Do this check after we picked the DMA chars so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 * get some IRQ immediately from RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			"fall back to interrupt mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		uap->im |= UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * Stop accepting received characters, when we're shutting down or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * suspending this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * Locking: called with port lock held and IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	/* FIXME.  Just disable the DMA enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	uap->dmacr &= ~UART011_RXDMAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * Timer handler for Rx DMA polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * Every polling, It checks the residue in the dma buffer and transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * data to the tty. Also, last_residue is updated for the next polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void pl011_dma_rx_poll(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct tty_port *port = &uap->port.state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	struct dma_chan *rxchan = uap->dmarx.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	unsigned int dmataken = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	unsigned int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	struct pl011_sgbuf *sgbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	int dma_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (likely(state.residue < dmarx->last_residue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		dmataken = sgbuf->sg.length - dmarx->last_residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		size = dmarx->last_residue - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 				size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		if (dma_count == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			dmarx->last_residue =  state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		dmarx->last_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	tty_flip_buffer_push(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	 * If no data is received in poll_timeout, the driver will fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	 * to interrupt mode. We will retrigger DMA at the first interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			> uap->dmarx.poll_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		spin_lock_irqsave(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		pl011_dma_rx_stop(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		uap->im |= UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		spin_unlock_irqrestore(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		uap->dmarx.running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		dmaengine_terminate_all(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		del_timer(&uap->dmarx.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		mod_timer(&uap->dmarx.timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static void pl011_dma_startup(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (!uap->dma_probed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		pl011_dma_probe(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (!uap->dmatx.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	if (!uap->dmatx.buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		uap->port.fifosize = uap->fifosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	/* The DMA buffer is now the FIFO the TTY subsystem can use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	uap->using_tx_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (!uap->dmarx.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		goto skip_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	/* Allocate and map DMA RX buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			"RX buffer A", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		goto skip_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			"RX buffer B", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		goto skip_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	uap->using_rx_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) skip_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/* Turn on DMA error (RX/TX will be enabled on demand) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	uap->dmacr |= UART011_DMAONERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 * ST Micro variants has some specific dma burst threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 * compensation. Set this to 16 bytes, so burst will only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * be issued above/below 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (uap->vendor->dma_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			    uap, REG_ST_DMAWM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (uap->using_rx_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		if (pl011_dma_rx_trigger_dma(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			dev_dbg(uap->port.dev, "could not trigger initial "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 				"RX DMA job, fall back to interrupt mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		if (uap->dmarx.poll_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			mod_timer(&uap->dmarx.timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				msecs_to_jiffies(uap->dmarx.poll_rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			uap->dmarx.last_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static void pl011_dma_shutdown(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (!(uap->using_tx_dma || uap->using_rx_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	/* Disable RX and TX DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	pl011_write(uap->dmacr, uap, REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (uap->using_tx_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		/* In theory, this should already be done by pl011_dma_flush_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		dmaengine_terminate_all(uap->dmatx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		if (uap->dmatx.queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			uap->dmatx.queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		kfree(uap->dmatx.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		uap->using_tx_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (uap->using_rx_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		dmaengine_terminate_all(uap->dmarx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		/* Clean up the RX DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		if (uap->dmarx.poll_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			del_timer_sync(&uap->dmarx.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		uap->using_rx_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	return uap->using_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	return uap->using_rx_dma && uap->dmarx.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Blank functions if the DMA engine is not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static inline void pl011_dma_remove(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static inline void pl011_dma_startup(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #define pl011_dma_flush_buffer	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void pl011_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	uap->im &= ~UART011_TXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	pl011_dma_tx_stop(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* Start TX with programmed I/O only (no DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static void pl011_start_tx_pio(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (pl011_tx_chars(uap, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		uap->im |= UART011_TXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void pl011_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (!pl011_dma_tx_start(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		pl011_start_tx_pio(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void pl011_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		     UART011_PEIM|UART011_BEIM|UART011_OEIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	pl011_dma_rx_stop(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void pl011_enable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static void pl011_rx_chars(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) __releases(&uap->port.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) __acquires(&uap->port.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	pl011_fifo_to_tty(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	spin_unlock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	tty_flip_buffer_push(&uap->port.state->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	 * If we were temporarily out of DMA mode for a while,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	 * attempt to switch back to DMA mode again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (pl011_dma_rx_available(uap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		if (pl011_dma_rx_trigger_dma(uap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			dev_dbg(uap->port.dev, "could not trigger RX DMA job "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 				"fall back to interrupt mode again\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			uap->im |= UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) #ifdef CONFIG_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			/* Start Rx DMA poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			if (uap->dmarx.poll_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 				uap->dmarx.last_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 				uap->dmarx.last_residue	= PL011_DMA_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 				mod_timer(&uap->dmarx.timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 					jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 					msecs_to_jiffies(uap->dmarx.poll_rate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	spin_lock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			  bool from_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (unlikely(!from_irq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	    pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return false; /* unable to transmit character */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	pl011_write(c, uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	uap->port.icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* Returns true if tx interrupts have to be (kept) enabled  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	struct circ_buf *xmit = &uap->port.state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	int count = uap->fifosize >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (uap->port.x_char) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		uap->port.x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		--count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		pl011_stop_tx(&uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/* If we are using DMA mode, try to send some characters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if (pl011_dma_tx_irq(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		if (likely(from_irq) && count-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	} while (!uart_circ_empty(xmit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		uart_write_wakeup(&uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (uart_circ_empty(xmit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		pl011_stop_tx(&uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void pl011_modem_status(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	unsigned int status, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	delta = status ^ uap->old_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	uap->old_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (!delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (delta & UART01x_FR_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (delta & uap->vendor->fr_dsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		uap->port.icount.dsr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	if (delta & uap->vendor->fr_cts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		uart_handle_cts_change(&uap->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				       status & uap->vendor->fr_cts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (!uap->vendor->cts_event_workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/* workaround to make sure that all bits are unlocked.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	pl011_write(0x00, uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 * WA: introduce 26ns(1 uart clk) delay before W1C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * single apb access will incur 2 pclk(133.12Mhz) delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 * so add 2 dummy reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	pl011_read(uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	pl011_read(uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static irqreturn_t pl011_int(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	struct uart_amba_port *uap = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	spin_lock_irqsave(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	status = pl011_read(uap, REG_RIS) & uap->im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			check_apply_cts_event_workaround(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 					       UART011_RXIS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 				    uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			if (status & (UART011_RTIS|UART011_RXIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 				if (pl011_dma_rx_running(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 					pl011_dma_rx_irq(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 					pl011_rx_chars(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 			if (status & (UART011_DSRMIS|UART011_DCDMIS|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 				      UART011_CTSMIS|UART011_RIMIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 				pl011_modem_status(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			if (status & UART011_TXIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 				pl011_tx_chars(uap, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			if (pass_counter-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			status = pl011_read(uap, REG_RIS) & uap->im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		} while (status != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	spin_unlock_irqrestore(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static unsigned int pl011_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	/* Allow feature register bits to be inverted to work around errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 							0 : TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static unsigned int pl011_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	unsigned int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	unsigned int status = pl011_read(uap, REG_FR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) #define TIOCMBIT(uartbit, tiocmbit)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (status & uartbit)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		result |= tiocmbit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) #undef TIOCMBIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	unsigned int cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	cr = pl011_read(uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) #define	TIOCMBIT(tiocmbit, uartbit)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (mctrl & tiocmbit)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		cr |= uartbit;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	else				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		cr &= ~uartbit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (port->status & UPSTAT_AUTORTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		/* We need to disable auto-RTS if we want to turn RTS off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) #undef TIOCMBIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	pl011_write(cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static void pl011_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	unsigned int lcr_h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	spin_lock_irqsave(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	lcr_h = pl011_read(uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	if (break_state == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		lcr_h |= UART01x_LCRH_BRK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		lcr_h &= ~UART01x_LCRH_BRK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	pl011_write(lcr_h, uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	spin_unlock_irqrestore(&uap->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static void pl011_quiesce_irqs(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	 * we simply mask it. start_tx() will unmask it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	 * Note we can race with start_tx(), and if the race happens, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	 * polling user might get another interrupt just after we clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 * But it should be OK and can happen even w/o the race, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * controller immediately got some new data and raised the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 * And whoever uses polling routines assumes that it manages the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 * (including tx queue), so we're also fine with start_tx()'s caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	 * side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		    REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static int pl011_get_poll_char(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 * The caller might need IRQs lowered, e.g. if used with KDB NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 * debugger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	pl011_quiesce_irqs(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	status = pl011_read(uap, REG_FR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	if (status & UART01x_FR_RXFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return NO_POLL_CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	return pl011_read(uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static void pl011_put_poll_char(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			 unsigned char ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	pl011_write(ch, uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) #endif /* CONFIG_CONSOLE_POLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static int pl011_hwinit(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	/* Optionaly enable pins to be muxed in and configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	pinctrl_pm_select_default_state(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	 * Try to enable the clock producer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	retval = clk_prepare_enable(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	uap->port.uartclk = clk_get_rate(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	/* Clear pending error and receive interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		    UART011_FEIS | UART011_RTIS | UART011_RXIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		    uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	 * Save interrupts enable mask, and enable RX interrupts in case if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 * the interrupt is used for NMI entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	uap->im = pl011_read(uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (dev_get_platdata(uap->port.dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		struct amba_pl011_data *plat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		plat = dev_get_platdata(uap->port.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		if (plat->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			plat->init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static bool pl011_split_lcrh(const struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	       pl011_reg_to_offset(uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	pl011_write(lcr_h, uap, REG_LCRH_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (pl011_split_lcrh(uap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		 * Wait 10 PCLKs before writing LCRH_TX register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		 * to get this delay write read only register 10 times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		for (i = 0; i < 10; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			pl011_write(0xff, uap, REG_MIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		pl011_write(lcr_h, uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) static int pl011_allocate_irq(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  * Enable interrupts, only timeouts when using DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  * if initial RX DMA job failed, start in interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  * as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static void pl011_enable_interrupts(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	/* Clear out any spuriously appearing RX interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	 * RXIS is asserted only when the RX FIFO transitions from below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	 * to above the trigger threshold.  If the RX FIFO is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	 * full to the threshold this can't happen and RXIS will now be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 * stuck off.  Drain the RX FIFO explicitly to fix this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	for (i = 0; i < uap->fifosize * 2; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		pl011_read(uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	uap->im = UART011_RTIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (!pl011_dma_rx_running(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		uap->im |= UART011_RXIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int pl011_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	unsigned int cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	retval = pl011_hwinit(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		goto clk_dis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	retval = pl011_allocate_irq(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		goto clk_dis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	pl011_write(uap->vendor->ifls, uap, REG_IFLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	/* restore RTS and DTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	pl011_write(cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	 * initialise the old status of the modem signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	/* Startup DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	pl011_dma_startup(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	pl011_enable_interrupts(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  clk_dis:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	clk_disable_unprepare(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int sbsa_uart_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	retval = pl011_hwinit(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	retval = pl011_allocate_irq(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	/* The SBSA UART does not support any modem status lines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	uap->old_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	pl011_enable_interrupts(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static void pl011_shutdown_channel(struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 					unsigned int lcrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)       unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)       val = pl011_read(uap, lcrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)       val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)       pl011_write(val, uap, lcrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)  * disable the port. It should not disable RTS and DTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)  * Also RTS and DTR state should be preserved to restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * it during startup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) static void pl011_disable_uart(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	unsigned int cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	cr = pl011_read(uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	uap->old_cr = cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	cr &= UART011_CR_RTS | UART011_CR_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	pl011_write(cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	 * disable break condition and fifos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	pl011_shutdown_channel(uap, REG_LCRH_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if (pl011_split_lcrh(uap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		pl011_shutdown_channel(uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) static void pl011_disable_interrupts(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	spin_lock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	/* mask all interrupts and clear all pending ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	uap->im = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	pl011_write(uap->im, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	pl011_write(0xffff, uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	spin_unlock_irq(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static void pl011_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	pl011_disable_interrupts(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	pl011_dma_shutdown(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	free_irq(uap->port.irq, uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	pl011_disable_uart(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 * Shut down the clock producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	clk_disable_unprepare(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	/* Optionally let pins go into sleep states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	pinctrl_pm_select_sleep_state(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (dev_get_platdata(uap->port.dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		struct amba_pl011_data *plat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		plat = dev_get_platdata(uap->port.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		if (plat->exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			plat->exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	if (uap->port.ops->flush_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		uap->port.ops->flush_buffer(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static void sbsa_uart_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	pl011_disable_interrupts(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	free_irq(uap->port.irq, uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	if (uap->port.ops->flush_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		uap->port.ops->flush_buffer(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	port->read_status_mask = UART011_DR_OE | 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		port->read_status_mask |= UART011_DR_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	 * Characters to ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	port->ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		port->ignore_status_mask |= UART011_DR_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		 * If we're ignoring parity and break indicators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		 * ignore overruns too (for real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			port->ignore_status_mask |= UART011_DR_OE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	 * Ignore all characters if CREAD is not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if ((termios->c_cflag & CREAD) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		port->ignore_status_mask |= UART_DUMMY_DR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) pl011_set_termios(struct uart_port *port, struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		     struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	unsigned int lcr_h, old_cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	unsigned int baud, quot, clkdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (uap->vendor->oversampling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		clkdiv = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		clkdiv = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	 * Ask the core to calculate the divisor for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	baud = uart_get_baud_rate(port, termios, old, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 				  port->uartclk / clkdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) #ifdef CONFIG_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	 * Adjust RX DMA polling rate with baud rate if not specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (uap->dmarx.auto_poll_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	if (baud > port->uartclk/16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	switch (termios->c_cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	case CS5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		lcr_h = UART01x_LCRH_WLEN_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	case CS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		lcr_h = UART01x_LCRH_WLEN_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	case CS7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		lcr_h = UART01x_LCRH_WLEN_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	default: // CS8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		lcr_h = UART01x_LCRH_WLEN_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	if (termios->c_cflag & CSTOPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		lcr_h |= UART01x_LCRH_STP2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	if (termios->c_cflag & PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		lcr_h |= UART01x_LCRH_PEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		if (!(termios->c_cflag & PARODD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			lcr_h |= UART01x_LCRH_EPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		if (termios->c_cflag & CMSPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			lcr_h |= UART011_LCRH_SPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	if (uap->fifosize > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		lcr_h |= UART01x_LCRH_FEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	 * Update the per-port timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	uart_update_timeout(port, termios->c_cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	pl011_setup_status_masks(port, termios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (UART_ENABLE_MS(port, termios->c_cflag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		pl011_enable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	/* first, disable everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	old_cr = pl011_read(uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	pl011_write(0, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	if (termios->c_cflag & CRTSCTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		if (old_cr & UART011_CR_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			old_cr |= UART011_CR_RTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		old_cr |= UART011_CR_CTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	if (uap->vendor->oversampling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		if (baud > port->uartclk / 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			old_cr |= ST_UART011_CR_OVSFACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			old_cr &= ~ST_UART011_CR_OVSFACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	 * Workaround for the ST Micro oversampling variants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	 * increase the bitrate slightly, by lowering the divisor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	 * to avoid delayed sampling of start bit at high speeds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	 * else we see data corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	if (uap->vendor->oversampling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			quot -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		else if ((baud > 3250000) && (quot > 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			quot -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	/* Set baud rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	pl011_write(quot & 0x3f, uap, REG_FBRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	pl011_write(quot >> 6, uap, REG_IBRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	 * ----------v----------v----------v----------v-----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	 * REG_FBRD & REG_IBRD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	 * ----------^----------^----------^----------^-----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	pl011_write_lcr_h(uap, lcr_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	pl011_write(old_cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		      struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	/* The SBSA UART only supports 8n1 without hardware flow control. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	termios->c_cflag &= ~(CMSPAR | CRTSCTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	termios->c_cflag |= CS8 | CLOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	uart_update_timeout(port, CS8, uap->fixed_baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	pl011_setup_status_masks(port, termios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static const char *pl011_type(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	return uap->port.type == PORT_AMBA ? uap->type : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * Configure/autoconfigure the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void pl011_config_port(struct uart_port *port, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	if (flags & UART_CONFIG_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		port->type = PORT_AMBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)  * verify the new serial_struct (for TIOCSSERIAL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	if (ser->irq < 0 || ser->irq >= nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (ser->baud_base < 9600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (port->mapbase != (unsigned long) ser->iomem_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) static const struct uart_ops amba_pl011_pops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	.tx_empty	= pl011_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	.set_mctrl	= pl011_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	.get_mctrl	= pl011_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	.stop_tx	= pl011_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	.start_tx	= pl011_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	.stop_rx	= pl011_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	.enable_ms	= pl011_enable_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	.break_ctl	= pl011_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	.startup	= pl011_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	.shutdown	= pl011_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	.flush_buffer	= pl011_dma_flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	.set_termios	= pl011_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	.type		= pl011_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	.config_port	= pl011_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	.verify_port	= pl011_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	.poll_init     = pl011_hwinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	.poll_get_char = pl011_get_poll_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	.poll_put_char = pl011_put_poll_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) static const struct uart_ops sbsa_uart_pops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	.tx_empty	= pl011_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	.set_mctrl	= sbsa_uart_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	.get_mctrl	= sbsa_uart_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	.stop_tx	= pl011_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	.start_tx	= pl011_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	.stop_rx	= pl011_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	.startup	= sbsa_uart_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	.shutdown	= sbsa_uart_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	.set_termios	= sbsa_uart_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	.type		= pl011_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	.config_port	= pl011_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	.verify_port	= pl011_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	.poll_init     = pl011_hwinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	.poll_get_char = pl011_get_poll_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	.poll_put_char = pl011_put_poll_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static struct uart_amba_port *amba_ports[UART_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) static void pl011_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	struct uart_amba_port *uap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	    container_of(port, struct uart_amba_port, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	pl011_write(ch, uap, REG_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) pl011_console_write(struct console *co, const char *s, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	struct uart_amba_port *uap = amba_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	unsigned int old_cr = 0, new_cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	int locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	clk_enable(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	if (uap->port.sysrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	else if (oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		locked = spin_trylock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		spin_lock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	 *	First save the CR then disable the interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	if (!uap->vendor->always_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		old_cr = pl011_read(uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		new_cr = old_cr & ~UART011_CR_CTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		pl011_write(new_cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	uart_console_write(&uap->port, s, count, pl011_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	 *	Finally, wait for transmitter to become empty and restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	 *	TCR. Allow feature register bits to be inverted to work around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	 *	errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 						& uap->vendor->fr_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	if (!uap->vendor->always_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		pl011_write(old_cr, uap, REG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		spin_unlock(&uap->port.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	clk_disable(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 				      int *parity, int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		unsigned int lcr_h, ibrd, fbrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		lcr_h = pl011_read(uap, REG_LCRH_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		*parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		if (lcr_h & UART01x_LCRH_PEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 			if (lcr_h & UART01x_LCRH_EPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 				*parity = 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 				*parity = 'o';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			*bits = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			*bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		ibrd = pl011_read(uap, REG_IBRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		fbrd = pl011_read(uap, REG_FBRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		if (uap->vendor->oversampling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 			if (pl011_read(uap, REG_CR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 				  & ST_UART011_CR_OVSFACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 				*baud *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static int pl011_console_setup(struct console *co, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	struct uart_amba_port *uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	int baud = 38400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	int bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	int parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	int flow = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	 * Check whether an invalid uart number has been specified, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	 * if so, search for the first available port that does have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 * console support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	if (co->index >= UART_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		co->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	uap = amba_ports[co->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	if (!uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	/* Allow pins to be muxed in and configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	pinctrl_pm_select_default_state(uap->port.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	ret = clk_prepare(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	if (dev_get_platdata(uap->port.dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		struct amba_pl011_data *plat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		plat = dev_get_platdata(uap->port.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		if (plat->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			plat->init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	uap->port.uartclk = clk_get_rate(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	if (uap->vendor->fixed_options) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		baud = uap->fixed_baud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		if (options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			uart_parse_options(options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 					   &baud, &parity, &bits, &flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 			pl011_console_get_options(uap, &baud, &parity, &bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)  *	pl011_console_match - non-standard console matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)  *	@co:	  registering console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)  *	@name:	  name from console command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)  *	@idx:	  index from console command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)  *	@options: ptr to option string from console command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)  *	Only attempts to match console command lines of the form:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)  *	    console=pl011,mmio|mmio32,<addr>[,<options>]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  *	    console=pl011,0x<addr>[,<options>]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  *	This form is used to register an initial earlycon boot console and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)  *	replace it with the amba_console at pl011 driver init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)  *	Performs console setup for a match (as required by interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)  *	If no <options> are specified, then assume the h/w is already setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)  *	Returns 0 if console matches; otherwise non-zero to use default matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static int pl011_console_match(struct console *co, char *name, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			       char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	unsigned char iotype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	resource_size_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	 * have a distinct console name, so make sure we check for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	 * The actual implementation of the erratum occurs in the probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	if (uart_parse_earlycon(options, &iotype, &addr, &options))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/* try to match the port specified on the command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		struct uart_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		if (!amba_ports[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		port = &amba_ports[i]->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		if (port->mapbase != addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		co->index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		port->cons = co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		return pl011_console_setup(co, options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static struct uart_driver amba_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static struct console amba_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	.name		= "ttyAMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	.write		= pl011_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	.device		= uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	.setup		= pl011_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	.match		= pl011_console_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	.index		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	.data		= &amba_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) #define AMBA_CONSOLE	(&amba_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) static void qdf2400_e44_putc(struct uart_port *port, int c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	writel(c, port->membase + UART01x_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	struct earlycon_device *dev = con->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static void pl011_putc(struct uart_port *port, int c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	if (port->iotype == UPIO_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		writel(c, port->membase + UART01x_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		writeb(c, port->membase + UART01x_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) static void pl011_early_write(struct console *con, const char *s, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	struct earlycon_device *dev = con->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	uart_console_write(&dev->port, s, n, pl011_putc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static int pl011_getc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		return NO_POLL_CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	if (port->iotype == UPIO_MEM32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		return readl(port->membase + UART01x_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		return readb(port->membase + UART01x_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static int pl011_early_read(struct console *con, char *s, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	struct earlycon_device *dev = con->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	int ch, num_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	while (num_read < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		ch = pl011_getc(&dev->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		if (ch == NO_POLL_CHAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		s[num_read++] = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	return num_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) #define pl011_early_read NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)  * On non-ACPI systems, earlycon is enabled by specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)  * "earlycon=pl011,<address>" on the kernel command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)  * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)  * by specifying only "earlycon" on the command line.  Because it requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)  * SPCR, the console starts after ACPI is parsed, which is later than a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)  * traditional early console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)  * To get the traditional early console that starts before ACPI is parsed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)  * specify the full "earlycon=pl011,<address>" option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static int __init pl011_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 					    const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	device->con->write = pl011_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	device->con->read = pl011_early_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)  * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)  * Erratum 44, traditional earlycon can be enabled by specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)  * "earlycon=qdf2400_e44,<address>".  Any options are ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)  * Alternatively, you can just specify "earlycon", and the early console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)  * will be enabled with the information from the SPCR table.  In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)  * case, the SPCR code will detect the need for the E44 work-around,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)  * and set the console name to "qdf2400_e44".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) qdf2400_e44_early_console_setup(struct earlycon_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 				const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	if (!device->port.membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	device->con->write = qdf2400_e44_early_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) #define AMBA_CONSOLE	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) static struct uart_driver amba_reg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	.driver_name		= "ttyAMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	.dev_name		= "ttyAMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	.major			= SERIAL_AMBA_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	.minor			= SERIAL_AMBA_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	.nr			= UART_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	.cons			= AMBA_CONSOLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) static int pl011_probe_dt_alias(int index, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	static bool seen_dev_with_alias = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	static bool seen_dev_without_alias = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	int ret = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	if (!IS_ENABLED(CONFIG_OF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	ret = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		seen_dev_without_alias = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		ret = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		seen_dev_with_alias = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			dev_warn(dev, "requested serial port %d  not available.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			ret = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	if (seen_dev_with_alias && seen_dev_without_alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) /* unregisters the driver also if no more ports are left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) static void pl011_unregister_port(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	bool busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		if (amba_ports[i] == uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			amba_ports[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		else if (amba_ports[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	pl011_dma_remove(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	if (!busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		uart_unregister_driver(&amba_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static int pl011_find_free_port(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		if (amba_ports[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			    struct resource *mmiobase, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	base = devm_ioremap_resource(dev, mmiobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	index = pl011_probe_dt_alias(index, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	uap->old_cr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	uap->port.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	uap->port.mapbase = mmiobase->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	uap->port.membase = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	uap->port.fifosize = uap->fifosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	uap->port.flags = UPF_BOOT_AUTOCONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	uap->port.line = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	amba_ports[index] = uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static int pl011_register_port(struct uart_amba_port *uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	/* Ensure interrupts from this UART are masked and cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	pl011_write(0, uap, REG_IMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	pl011_write(0xffff, uap, REG_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	if (!amba_reg.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		ret = uart_register_driver(&amba_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			dev_err(uap->port.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 				"Failed to register AMBA-PL011 driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 				if (amba_ports[i] == uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 					amba_ports[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	ret = uart_add_one_port(&amba_reg, &uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		pl011_unregister_port(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	struct uart_amba_port *uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	struct vendor_data *vendor = id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	int portnr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	portnr = pl011_find_free_port();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (portnr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		return portnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (!uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	uap->clk = devm_clk_get(&dev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (IS_ERR(uap->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		return PTR_ERR(uap->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	uap->reg_offset = vendor->reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	uap->vendor = vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	uap->fifosize = vendor->get_fifosize(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	uap->port.irq = dev->irq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	uap->port.ops = &amba_pl011_pops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	amba_set_drvdata(dev, uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	return pl011_register_port(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static void pl011_remove(struct amba_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	struct uart_amba_port *uap = amba_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	uart_remove_one_port(&amba_reg, &uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	pl011_unregister_port(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static int pl011_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	struct uart_amba_port *uap = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (!uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	return uart_suspend_port(&amba_reg, &uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) static int pl011_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	struct uart_amba_port *uap = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	if (!uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	return uart_resume_port(&amba_reg, &uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) static int sbsa_uart_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	struct uart_amba_port *uap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	int portnr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	int baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	 * Check the mandatory baud rate parameter in the DT node early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	 * so that we can easily exit with the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	if (pdev->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		ret = of_property_read_u32(np, "current-speed", &baudrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		baudrate = 115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	portnr = pl011_find_free_port();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	if (portnr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		return portnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	if (!uap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	uap->port.irq	= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) #ifdef CONFIG_ACPI_SPCR_TABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	if (qdf2400_e44_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		uap->vendor = &vendor_qdt_qdf2400_e44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		uap->vendor = &vendor_sbsa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	uap->reg_offset	= uap->vendor->reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	uap->fifosize	= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	uap->port.ops	= &sbsa_uart_pops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	uap->fixed_baud = baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	snprintf(uap->type, sizeof(uap->type), "SBSA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	platform_set_drvdata(pdev, uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	return pl011_register_port(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) static int sbsa_uart_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	struct uart_amba_port *uap = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	uart_remove_one_port(&amba_reg, &uap->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	pl011_unregister_port(uap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) static const struct of_device_id sbsa_uart_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	{ .compatible = "arm,sbsa-uart", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) static const struct acpi_device_id sbsa_uart_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	{ "ARMH0011", 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	{ "ARMHB000", 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) static struct platform_driver arm_sbsa_uart_platform_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	.probe		= sbsa_uart_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	.remove		= sbsa_uart_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		.name	= "sbsa-uart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		.pm	= &pl011_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		.of_match_table = of_match_ptr(sbsa_uart_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 		.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) static const struct amba_id pl011_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		.id	= 0x00041011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		.mask	= 0x000fffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		.data	= &vendor_arm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		.id	= 0x00380802,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		.mask	= 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		.data	= &vendor_st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		.id	= AMBA_LINUX_ID(0x00, 0x1, 0xffe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		.mask	= 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		.data	= &vendor_zte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	{ 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) MODULE_DEVICE_TABLE(amba, pl011_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) static struct amba_driver pl011_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	.drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		.name	= "uart-pl011",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		.pm	= &pl011_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	.id_table	= pl011_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	.probe		= pl011_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	.remove		= pl011_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) static int __init pl011_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (platform_driver_register(&arm_sbsa_uart_platform_driver))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		pr_warn("could not register SBSA UART platform driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	return amba_driver_register(&pl011_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static void __exit pl011_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	platform_driver_unregister(&arm_sbsa_uart_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	amba_driver_unregister(&pl011_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  * While this can be a module, if builtin it's most likely the console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)  * So let's leave module_exit but move module_init to an earlier place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) arch_initcall(pl011_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) module_exit(pl011_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) MODULE_DESCRIPTION("ARM AMBA serial port driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) MODULE_LICENSE("GPL");