^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Atmel AT91 Serial ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2003 Rick Bronson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * DMA support added by Chip Coldwell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sysrq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/atmel_pdc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_data/atmel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/ioctls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PDC_BUFFER_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Revisit: We should calculate this based on the actual port settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* The minium number of data FIFOs should be able to contain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ATMEL_MIN_FIFO_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * These two offsets are substracted from the RX FIFO size to define the RTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * high and low thresholds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define ATMEL_RTS_HIGH_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define ATMEL_RTS_LOW_OFFSET 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "serial_mctrl_gpio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "atmel_serial.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void atmel_start_rx(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void atmel_stop_rx(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifdef CONFIG_SERIAL_ATMEL_TTYAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * should coexist with the 8250 driver, such as if we have an external 16C550
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * UART. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SERIAL_ATMEL_MAJOR 204
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MINOR_START 154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define ATMEL_DEVICENAME "ttyAT"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * name, but it is legally reserved for the 8250 driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SERIAL_ATMEL_MAJOR TTY_MAJOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define MINOR_START 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define ATMEL_DEVICENAME "ttyS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ATMEL_ISR_PASS_LIMIT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct atmel_dma_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct atmel_uart_char {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u16 ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Be careful, the real size of the ring buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * can contain up to 1024 characters in PIO mode and up to 4096 characters in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * DMA mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define ATMEL_SERIAL_RINGSIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * at91: 6 USARTs and one DBGU port (SAM9260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * samx7: 3 USARTs and 5 UARTs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define ATMEL_MAX_UART 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * We wrap our port structure around the generic uart_port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct atmel_uart_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct uart_port uart; /* uart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct clk *clk; /* uart clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 backup_imr; /* IMR saved during suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int break_active; /* break being received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bool use_dma_rx; /* enable DMA receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bool use_pdc_rx; /* enable PDC receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) short pdc_rx_idx; /* current PDC RX buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool use_dma_tx; /* enable DMA transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bool use_pdc_tx; /* enable PDC transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spinlock_t lock_tx; /* port lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spinlock_t lock_rx; /* port lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct dma_chan *chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct dma_chan *chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct dma_async_tx_descriptor *desc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct dma_async_tx_descriptor *desc_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dma_cookie_t cookie_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dma_cookie_t cookie_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct scatterlist sg_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct scatterlist sg_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct tasklet_struct tasklet_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct tasklet_struct tasklet_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) atomic_t tasklet_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int irq_status_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct circ_buf rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct mctrl_gpios *gpios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u32 backup_mode; /* MR saved during iso7816 operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 backup_brgr; /* BRGR saved during iso7816 operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned int tx_done_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 rts_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 rts_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool ms_irq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 rtor; /* address of receiver timeout register if it exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool has_frac_baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool has_hw_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct timer_list uart_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool tx_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bool suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int pending_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spinlock_t lock_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) bool hd_start_rx; /* can start RX during half-duplex operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* ISO7816 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int fidi_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int fidi_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 brgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 rtor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 ttgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 fmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 fimr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) } cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int (*prepare_rx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int (*prepare_tx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void (*schedule_rx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void (*schedule_tx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void (*release_rx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void (*release_tx)(struct uart_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #if defined(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static const struct of_device_id atmel_serial_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) { .compatible = "atmel,at91rm9200-usart-serial" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline struct atmel_uart_port *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) to_atmel_uart_port(struct uart_port *uart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return container_of(uart, struct atmel_uart_port, uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return __raw_readl(port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __raw_writel(value, port->membase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline u8 atmel_uart_read_char(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return __raw_readb(port->membase + ATMEL_US_RHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __raw_writeb(value, port->membase + ATMEL_US_THR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline int atmel_uart_is_half_duplex(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return ((port->rs485.flags & SER_RS485_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) (port->iso7816.flags & SER_ISO7816_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef CONFIG_SERIAL_ATMEL_PDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static bool atmel_use_pdc_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return atmel_port->use_pdc_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static bool atmel_use_pdc_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return atmel_port->use_pdc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static bool atmel_use_pdc_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static bool atmel_use_pdc_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static bool atmel_use_dma_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return atmel_port->use_dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static bool atmel_use_dma_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return atmel_port->use_dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static bool atmel_use_fifo(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return atmel_port->fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!atomic_read(&atmel_port->tasklet_shutdown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tasklet_schedule(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Enable or disable the rs485 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int atmel_config_rs485(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct serial_rs485 *rs485conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) mode = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Resetting serial mode to RS232 (0x0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mode &= ~ATMEL_US_USMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) port->rs485 = *rs485conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (rs485conf->flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev_dbg(port->dev, "Setting UART to RS485\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (port->rs485.flags & SER_RS485_RX_DURING_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) atmel_port->tx_done_mask = ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) atmel_uart_writel(port, ATMEL_US_TTGR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) rs485conf->delay_rts_after_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mode |= ATMEL_US_USMODE_RS485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dev_dbg(port->dev, "Setting UART to RS232\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (atmel_use_pdc_tx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) atmel_port->tx_done_mask = ATMEL_US_ENDTX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ATMEL_US_TXBUFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) atmel_port->tx_done_mask = ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) atmel_uart_writel(port, ATMEL_US_MR, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static unsigned int atmel_calc_cd(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct serial_iso7816 *iso7816conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned int cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u64 mck_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mck_rate = (u64)clk_get_rate(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) do_div(mck_rate, iso7816conf->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cd = mck_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static unsigned int atmel_calc_fidi(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct serial_iso7816 *iso7816conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u64 fidi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (iso7816conf->sc_fi && iso7816conf->sc_di) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) fidi = (u64)iso7816conf->sc_fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) do_div(fidi, iso7816conf->sc_di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return (u32)fidi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Enable or disable the iso7816 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Called with interrupts disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int atmel_config_iso7816(struct uart_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct serial_iso7816 *iso7816conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned int cd, fidi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mode = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (iso7816conf->flags & SER_ISO7816_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) mode &= ~ATMEL_US_USMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (iso7816conf->tg > 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) memset(iso7816conf, 0, sizeof(struct serial_iso7816));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) == SER_ISO7816_T(0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) == SER_ISO7816_T(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dev_err(port->dev, "ISO7816: Type not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) memset(iso7816conf, 0, sizeof(struct serial_iso7816));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* select mck clock, and output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* set parity for normal/inverse mode + max iterations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cd = atmel_calc_cd(port, iso7816conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) fidi = atmel_calc_fidi(port, iso7816conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (fidi == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else if (fidi < atmel_port->fidi_min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) || fidi > atmel_port->fidi_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) memset(iso7816conf, 0, sizeof(struct serial_iso7816));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* port not yet in iso7816 mode: store configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) atmel_uart_writel(port, ATMEL_US_BRGR, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) atmel_uart_writel(port, ATMEL_US_FIDI, fidi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev_dbg(port->dev, "Setting UART back to RS232\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* back to last RS232 settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mode = atmel_port->backup_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) memset(iso7816conf, 0, sizeof(struct serial_iso7816));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) atmel_uart_writel(port, ATMEL_US_TTGR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) atmel_uart_writel(port, ATMEL_US_FIDI, 0x174);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (atmel_use_pdc_tx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) atmel_port->tx_done_mask = ATMEL_US_ENDTX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ATMEL_US_TXBUFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) atmel_port->tx_done_mask = ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) port->iso7816 = *iso7816conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) atmel_uart_writel(port, ATMEL_US_MR, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static u_int atmel_tx_empty(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (atmel_port->tx_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return TIOCSER_TEMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) TIOCSER_TEMT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Set state of the modem control output lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned int control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int rts_paused, rts_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* override mode to RS485 if needed, otherwise keep the current mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (port->rs485.flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) atmel_uart_writel(port, ATMEL_US_TTGR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) port->rs485.delay_rts_after_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) mode &= ~ATMEL_US_USMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) mode |= ATMEL_US_USMODE_RS485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* set the RTS line state according to the mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* force RTS line to high level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rts_paused = ATMEL_US_RTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* give the control of the RTS line back to the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rts_ready = ATMEL_US_RTSDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* force RTS line to high level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rts_paused = ATMEL_US_RTSDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* force RTS line to low level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) rts_ready = ATMEL_US_RTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (mctrl & TIOCM_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) control |= rts_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) control |= rts_paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (mctrl & TIOCM_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) control |= ATMEL_US_DTREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) control |= ATMEL_US_DTRDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) atmel_uart_writel(port, ATMEL_US_CR, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) mctrl_gpio_set(atmel_port->gpios, mctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Local loopback mode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) mode &= ~ATMEL_US_CHMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (mctrl & TIOCM_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mode |= ATMEL_US_CHMODE_LOC_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mode |= ATMEL_US_CHMODE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) atmel_uart_writel(port, ATMEL_US_MR, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Get state of the modem control input lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static u_int atmel_get_mctrl(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned int ret = 0, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) status = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * The control signals are active low.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!(status & ATMEL_US_DCD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret |= TIOCM_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!(status & ATMEL_US_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret |= TIOCM_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!(status & ATMEL_US_DSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ret |= TIOCM_DSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!(status & ATMEL_US_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret |= TIOCM_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return mctrl_gpio_get(atmel_port->gpios, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Stop transmitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static void atmel_stop_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (atmel_use_pdc_tx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* disable PDC transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Disable the transmitter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * This is mandatory when DMA is used, otherwise the DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * is fully transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) atmel_port->tx_stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (atmel_uart_is_half_duplex(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!atomic_read(&atmel_port->tasklet_shutdown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) atmel_start_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Start transmitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void atmel_start_tx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) & ATMEL_PDC_TXTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* The transmitter is already running. Yes, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) really need this.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (atmel_uart_is_half_duplex(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) atmel_stop_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (atmel_use_pdc_tx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* re-enable PDC transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* re-enable the transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) atmel_port->tx_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * start receiving - port is in process of being opened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void atmel_start_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* reset status and receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (atmel_use_pdc_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* enable PDC controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) port->read_status_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Stop receiving - port is in process of being closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static void atmel_stop_rx(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (atmel_use_pdc_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* disable PDC receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) port->read_status_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Enable modem status interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static void atmel_enable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) uint32_t ier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Interrupt should not be enabled twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (atmel_port->ms_irq_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) atmel_port->ms_irq_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ier |= ATMEL_US_CTSIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ier |= ATMEL_US_DSRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ier |= ATMEL_US_RIIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ier |= ATMEL_US_DCDIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) atmel_uart_writel(port, ATMEL_US_IER, ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mctrl_gpio_enable_ms(atmel_port->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Disable modem status interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void atmel_disable_ms(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) uint32_t idr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Interrupt should not be disabled twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (!atmel_port->ms_irq_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) atmel_port->ms_irq_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) mctrl_gpio_disable_ms(atmel_port->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) idr |= ATMEL_US_CTSIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) idr |= ATMEL_US_DSRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) idr |= ATMEL_US_RIIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) idr |= ATMEL_US_DCDIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) atmel_uart_writel(port, ATMEL_US_IDR, idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Control the transmission of a break signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void atmel_break_ctl(struct uart_port *port, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (break_state != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* start break */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* stop break */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Stores the incoming character in the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct circ_buf *ring = &atmel_port->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct atmel_uart_char *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Buffer overflow, ignore char */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) c = &((struct atmel_uart_char *)ring->buf)[ring->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) c->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) c->ch = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Make sure the character is stored before we update head. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Deal with parity, framing and overrun errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* clear error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (status & ATMEL_US_RXBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* ignore side-effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) port->icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (status & ATMEL_US_PARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) port->icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (status & ATMEL_US_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) port->icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (status & ATMEL_US_OVRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) port->icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Characters received (called from interrupt handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void atmel_rx_chars(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned int status, ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) status = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) while (status & ATMEL_US_RXRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ch = atmel_uart_read_char(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * note that the error handling code is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * out of the main execution path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) | ATMEL_US_OVRE | ATMEL_US_RXBRK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) || atmel_port->break_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* clear error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (status & ATMEL_US_RXBRK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) && !atmel_port->break_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) atmel_port->break_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ATMEL_US_RXBRK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * This is either the end-of-break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * condition or we've received at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * least one character without RXBRK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * being set. In both cases, the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * RXBRK will indicate start-of-break.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ATMEL_US_RXBRK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) status &= ~ATMEL_US_RXBRK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) atmel_port->break_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) atmel_buffer_rx_char(port, status, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) status = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Transmit characters (called from tasklet with TXRDY interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static void atmel_tx_chars(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (port->x_char &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) atmel_uart_write_char(port, port->x_char);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) port->icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) port->x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (uart_circ_empty(xmit) || uart_tx_stopped(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) atmel_uart_write_char(port, xmit->buf[xmit->tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) port->icount.tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!uart_circ_empty(xmit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* we still have characters to transmit, so we should continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * transmitting them when TX is ready, regardless of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * mode or duplexity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) atmel_port->tx_done_mask |= ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (atmel_uart_is_half_duplex(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static void atmel_complete_tx_dma(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct atmel_uart_port *atmel_port = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct uart_port *port = &atmel_port->uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct dma_chan *chan = atmel_port->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) xmit->tail += atmel_port->tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) xmit->tail &= UART_XMIT_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) port->icount.tx += atmel_port->tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) spin_lock_irq(&atmel_port->lock_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) async_tx_ack(atmel_port->desc_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) atmel_port->cookie_tx = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) atmel_port->desc_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock_irq(&atmel_port->lock_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * xmit is a circular buffer so, if we have just send data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * xmit->tail to the end of xmit->buf, now we have to transmit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * remaining data from the beginning of xmit->buf to xmit->head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!uart_circ_empty(xmit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) else if (atmel_uart_is_half_duplex(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * DMA done, re-enable TXEMPTY and signal that we can stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * TX and start RX for RS485
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) atmel_port->hd_start_rx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static void atmel_release_tx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct dma_chan *chan = atmel_port->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) atmel_port->desc_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) atmel_port->chan_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) atmel_port->cookie_tx = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Called from tasklet with TXRDY interrupt is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static void atmel_tx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct dma_chan *chan = atmel_port->chan_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned int tx_len, part1_len, part2_len, sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dma_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Make sure we have an idle channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (atmel_port->desc_tx != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * DMA is idle now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * Port xmit buffer is already mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * and it is one page... Just adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * offsets and lengths. Since it is a circular buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * we have to transmit till the end, and then the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Take the port lock to get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * consistent xmit buffer state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) tx_len = CIRC_CNT_TO_END(xmit->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) xmit->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) UART_XMIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (atmel_port->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* multi data mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) part1_len = (tx_len & ~0x3); /* DWORD access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) part2_len = (tx_len & 0x3); /* BYTE access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* single data (legacy) mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) part1_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) part2_len = tx_len; /* BYTE access only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) sg_init_table(sgl, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) phys_addr = sg_dma_address(sg_tx) + xmit->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (part1_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) sg = &sgl[sg_len++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) sg_dma_address(sg) = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) sg_dma_len(sg) = part1_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) phys_addr += part1_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (part2_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) sg = &sgl[sg_len++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) sg_dma_address(sg) = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) sg_dma_len(sg) = part2_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * save tx_len so atmel_complete_tx_dma() will increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * xmit->tail correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) atmel_port->tx_len = tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) desc = dmaengine_prep_slave_sg(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) dev_err(port->dev, "Failed to send via dma!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) atmel_port->desc_tx = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) desc->callback = atmel_complete_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) desc->callback_param = atmel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) atmel_port->cookie_tx = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (dma_submit_error(atmel_port->cookie_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) dev_err(port->dev, "dma_submit_error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) atmel_port->cookie_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int atmel_prepare_tx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct device *mfd_dev = port->dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int ret, nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (atmel_port->chan_tx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dev_info(port->dev, "using %s for tx DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dma_chan_name(atmel_port->chan_tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) spin_lock_init(&atmel_port->lock_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) sg_init_table(&atmel_port->sg_tx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* UART circular tx buffer is an aligned page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) sg_set_page(&atmel_port->sg_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) virt_to_page(port->state->xmit.buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) UART_XMIT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) offset_in_page(port->state->xmit.buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) nent = dma_map_sg(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) &atmel_port->sg_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!nent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dev_dbg(port->dev, "need to release resource of dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sg_dma_len(&atmel_port->sg_tx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) port->state->xmit.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) &sg_dma_address(&atmel_port->sg_tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* Configure the slave DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) config.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) config.dst_addr_width = (atmel_port->fifo_size) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) DMA_SLAVE_BUSWIDTH_4_BYTES :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) config.dst_addr = port->mapbase + ATMEL_US_THR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) config.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = dmaengine_slave_config(atmel_port->chan_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) dev_err(port->dev, "DMA tx slave configuration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) chan_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) dev_err(port->dev, "TX channel not available, switch to pio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) atmel_port->use_dma_tx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (atmel_port->chan_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) atmel_release_tx_dma(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void atmel_complete_rx_dma(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct uart_port *port = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static void atmel_release_rx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct dma_chan *chan = atmel_port->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dmaengine_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) atmel_port->desc_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) atmel_port->chan_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) atmel_port->cookie_rx = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void atmel_rx_from_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct tty_port *tport = &port->state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct circ_buf *ring = &atmel_port->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct dma_chan *chan = atmel_port->chan_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) enum dma_status dmastat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Reset the UART timeout early so that we don't miss one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) dmastat = dmaengine_tx_status(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) atmel_port->cookie_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /* Restart a new tasklet if DMA status is error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (dmastat == DMA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_dbg(port->dev, "Get residue error, restart tasklet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* CPU claims ownership of RX DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dma_sync_sg_for_cpu(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) &atmel_port->sg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * ring->head points to the end of data already written by the DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * ring->tail points to the beginning of data to be read by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * The current transfer size should not be larger than the dma buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * At this point ring->head may point to the first byte right after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * last byte of the dma buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * However ring->tail must always points inside the dma buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * Since we use a ring buffer, we have to handle the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * where head is lower than tail. In such a case, we first read from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * tail to the end of the buffer then reset tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (ring->head < ring->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) tty_insert_flip_string(tport, ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ring->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) port->icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* Finally we read data from tail to head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (ring->tail < ring->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) count = ring->head - ring->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) tty_insert_flip_string(tport, ring->buf + ring->tail, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Wrap ring->head if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ring->tail = ring->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) port->icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* USART retreives ownership of RX DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dma_sync_sg_for_device(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) &atmel_port->sg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Drop the lock here since it might end up calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * uart_start(), which takes the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) tty_flip_buffer_push(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int atmel_prepare_rx_dma(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct device *mfd_dev = port->dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct circ_buf *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int ret, nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ring = &atmel_port->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) dma_cap_set(DMA_CYCLIC, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (atmel_port->chan_rx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) dev_info(port->dev, "using %s for rx DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) dma_chan_name(atmel_port->chan_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) spin_lock_init(&atmel_port->lock_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) sg_init_table(&atmel_port->sg_rx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* UART circular rx buffer is an aligned page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) BUG_ON(!PAGE_ALIGNED(ring->buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) sg_set_page(&atmel_port->sg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) virt_to_page(ring->buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) offset_in_page(ring->buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) nent = dma_map_sg(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) &atmel_port->sg_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!nent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) dev_dbg(port->dev, "need to release resource of dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) sg_dma_len(&atmel_port->sg_rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ring->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) &sg_dma_address(&atmel_port->sg_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* Configure the slave DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) config.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) config.src_addr = port->mapbase + ATMEL_US_RHR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) config.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ret = dmaengine_slave_config(atmel_port->chan_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) dev_err(port->dev, "DMA rx slave configuration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * Prepare a cyclic dma transfer, assign 2 descriptors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * each one is half ring buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) sg_dma_address(&atmel_port->sg_rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) sg_dma_len(&atmel_port->sg_rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) sg_dma_len(&atmel_port->sg_rx)/2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dev_err(port->dev, "Preparing DMA cyclic failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) desc->callback = atmel_complete_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) desc->callback_param = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) atmel_port->desc_rx = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) atmel_port->cookie_rx = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (dma_submit_error(atmel_port->cookie_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dev_err(port->dev, "dma_submit_error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) atmel_port->cookie_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto chan_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dma_async_issue_pending(atmel_port->chan_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) chan_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) dev_err(port->dev, "RX channel not available, switch to pio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) atmel_port->use_dma_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (atmel_port->chan_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) atmel_release_rx_dma(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void atmel_uart_timer_callback(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) uart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct uart_port *port = &atmel_port->uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!atomic_read(&atmel_port->tasklet_shutdown)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) tasklet_schedule(&atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mod_timer(&atmel_port->uart_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) jiffies + uart_poll_timeout(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * receive interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) atmel_handle_receive(struct uart_port *port, unsigned int pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (atmel_use_pdc_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * PDC receive. Just schedule the tasklet and let it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * figure out the details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * TODO: We're not handling error flags correctly at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * the moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) atmel_tasklet_schedule(atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) &atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ATMEL_US_FRAME | ATMEL_US_PARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) atmel_pdc_rxerr(port, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (atmel_use_dma_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (pending & ATMEL_US_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) atmel_tasklet_schedule(atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) &atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /* Interrupt receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (pending & ATMEL_US_RXRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) atmel_rx_chars(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) else if (pending & ATMEL_US_RXBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * End of break detected. If it came along with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * character, atmel_rx_chars will handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) atmel_port->break_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) atmel_handle_transmit(struct uart_port *port, unsigned int pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (pending & atmel_port->tx_done_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* Start RX if flag was set and FIFO is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (atmel_port->hd_start_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!(atmel_uart_readl(port, ATMEL_US_CSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) & ATMEL_US_TXEMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) atmel_port->hd_start_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) atmel_start_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * status flags interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) atmel_handle_status(struct uart_port *port, unsigned int pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) unsigned int status_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) | ATMEL_US_CTSIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) status_change = status ^ atmel_port->irq_status_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) atmel_port->irq_status_prev = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) | ATMEL_US_DCD | ATMEL_US_CTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* TODO: All reads to CSR will clear these interrupts! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (status_change & ATMEL_US_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) port->icount.rng++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (status_change & ATMEL_US_DSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) port->icount.dsr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (status_change & ATMEL_US_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (status_change & ATMEL_US_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) wake_up_interruptible(&port->state->port.delta_msr_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static irqreturn_t atmel_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct uart_port *port = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) unsigned int status, pending, mask, pass_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) spin_lock(&atmel_port->lock_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) status = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) mask = atmel_uart_readl(port, ATMEL_US_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) pending = status & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (atmel_port->suspended) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) atmel_port->pending |= pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) atmel_port->pending_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) atmel_uart_writel(port, ATMEL_US_IDR, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) pm_system_wakeup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) atmel_handle_receive(port, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) atmel_handle_status(port, pending, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) atmel_handle_transmit(port, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) spin_unlock(&atmel_port->lock_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return pass_counter ? IRQ_HANDLED : IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static void atmel_release_tx_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) dma_unmap_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pdc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) pdc->dma_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void atmel_tx_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* nothing left to transmit? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (atmel_uart_readl(port, ATMEL_PDC_TCR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) xmit->tail += pdc->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) xmit->tail &= UART_XMIT_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) port->icount.tx += pdc->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pdc->ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* more to transmit - setup next transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* disable PDC transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) dma_sync_single_for_device(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) pdc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) pdc->dma_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) pdc->ofs = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) atmel_uart_writel(port, ATMEL_PDC_TPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) pdc->dma_addr + xmit->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) atmel_uart_writel(port, ATMEL_PDC_TCR, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* re-enable PDC transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (atmel_uart_is_half_duplex(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* DMA done, stop TX, start RX for RS485 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) atmel_start_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) uart_write_wakeup(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static int atmel_prepare_tx_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct circ_buf *xmit = &port->state->xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) pdc->buf = xmit->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) pdc->dma_addr = dma_map_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) pdc->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) UART_XMIT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) pdc->dma_size = UART_XMIT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) pdc->ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static void atmel_rx_from_ring(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct circ_buf *ring = &atmel_port->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) unsigned int flg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) while (ring->head != ring->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct atmel_uart_char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* Make sure c is loaded after head. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) port->icount.rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) status = c.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) flg = TTY_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * note that the error handling code is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * out of the main execution path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (status & ATMEL_US_RXBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* ignore side-effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) port->icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (uart_handle_break(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (status & ATMEL_US_PARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) port->icount.parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (status & ATMEL_US_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) port->icount.frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (status & ATMEL_US_OVRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) port->icount.overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) status &= port->read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (status & ATMEL_US_RXBRK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) flg = TTY_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) else if (status & ATMEL_US_PARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) flg = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) else if (status & ATMEL_US_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) flg = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (uart_handle_sysrq_char(port, c.ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * Drop the lock here since it might end up calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * uart_start(), which takes the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) tty_flip_buffer_push(&port->state->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static void atmel_release_rx_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dma_unmap_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) pdc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) pdc->dma_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) kfree(pdc->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static void atmel_rx_from_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct tty_port *tport = &port->state->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct atmel_dma_buffer *pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int rx_idx = atmel_port->pdc_rx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) unsigned int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) unsigned int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* Reset the UART timeout early so that we don't miss one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) pdc = &atmel_port->pdc_rx[rx_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) tail = pdc->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* If the PDC has switched buffers, RPR won't contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * any address within the current buffer. Since head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * is unsigned, we just need a one-way comparison to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * find out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * In this case, we just need to consume the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * buffer and resubmit it for DMA. This will clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * ENDRX bit as well, so that we can safely re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * all interrupts below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) head = min(head, pdc->dma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (likely(head != tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) pdc->dma_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * head will only wrap around when we recycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * the DMA buffer, and when that happens, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * explicitly set tail to 0. So head will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * always be greater than tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) count = head - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) dma_sync_single_for_device(port->dev, pdc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) pdc->dma_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) port->icount.rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) pdc->ofs = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * If the current buffer is full, we need to check if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * the next one contains any additional data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (head >= pdc->dma_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) pdc->ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) rx_idx = !rx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) atmel_port->pdc_rx_idx = rx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) } while (head >= pdc->dma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * Drop the lock here since it might end up calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * uart_start(), which takes the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) tty_flip_buffer_push(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int atmel_prepare_rx_pdc(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (pdc->buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (i != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) dma_unmap_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) atmel_port->pdc_rx[0].dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) PDC_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) kfree(atmel_port->pdc_rx[0].buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) atmel_port->use_pdc_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) pdc->dma_addr = dma_map_single(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) pdc->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) PDC_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) pdc->dma_size = PDC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) pdc->ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) atmel_port->pdc_rx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) atmel_uart_writel(port, ATMEL_PDC_RNPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) atmel_port->pdc_rx[1].dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * tasklet handling tty stuff outside the interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void atmel_tasklet_rx_func(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct uart_port *port = &atmel_port->uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* The interrupt handler does not take the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) atmel_port->schedule_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static void atmel_tasklet_tx_func(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) tasklet_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct uart_port *port = &atmel_port->uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* The interrupt handler does not take the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) atmel_port->schedule_tx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void atmel_init_property(struct atmel_uart_port *atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* DMA/PDC usage specification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (of_property_read_bool(np, "atmel,use-dma-rx")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (of_property_read_bool(np, "dmas")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) atmel_port->use_dma_rx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) atmel_port->use_pdc_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) atmel_port->use_dma_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) atmel_port->use_pdc_rx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) atmel_port->use_dma_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) atmel_port->use_pdc_rx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (of_property_read_bool(np, "atmel,use-dma-tx")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (of_property_read_bool(np, "dmas")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) atmel_port->use_dma_tx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) atmel_port->use_pdc_tx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) atmel_port->use_dma_tx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) atmel_port->use_pdc_tx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) atmel_port->use_dma_tx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) atmel_port->use_pdc_tx = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static void atmel_set_ops(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (atmel_use_dma_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) atmel_port->prepare_rx = &atmel_prepare_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) atmel_port->schedule_rx = &atmel_rx_from_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) atmel_port->release_rx = &atmel_release_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) } else if (atmel_use_pdc_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) atmel_port->schedule_rx = &atmel_rx_from_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) atmel_port->release_rx = &atmel_release_rx_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) atmel_port->prepare_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) atmel_port->schedule_rx = &atmel_rx_from_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) atmel_port->release_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (atmel_use_dma_tx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) atmel_port->prepare_tx = &atmel_prepare_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) atmel_port->schedule_tx = &atmel_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) atmel_port->release_tx = &atmel_release_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } else if (atmel_use_pdc_tx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) atmel_port->schedule_tx = &atmel_tx_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) atmel_port->release_tx = &atmel_release_tx_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) atmel_port->prepare_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) atmel_port->schedule_tx = &atmel_tx_chars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) atmel_port->release_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * Get ip name usart or uart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static void atmel_get_ip_name(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int name = atmel_uart_readl(port, ATMEL_US_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u32 usart, dbgu_uart, new_uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) /* ASCII decoding for IP version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) usart = 0x55534152; /* USAR(T) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) dbgu_uart = 0x44424755; /* DBGU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) new_uart = 0x55415254; /* UART */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * Only USART devices from at91sam9260 SOC implement fractional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * baudrate. It is available for all asynchronous modes, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * following restriction: the sampling clock's duty cycle is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * constant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) atmel_port->has_frac_baudrate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) atmel_port->has_hw_timer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (name == new_uart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) dev_dbg(port->dev, "Uart with hw timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) atmel_port->has_hw_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) atmel_port->rtor = ATMEL_UA_RTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) } else if (name == usart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) dev_dbg(port->dev, "Usart\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) atmel_port->has_frac_baudrate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) atmel_port->has_hw_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) atmel_port->rtor = ATMEL_US_RTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) version = atmel_uart_readl(port, ATMEL_US_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) switch (version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) case 0x814: /* sama5d2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) case 0x701: /* sama5d4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) atmel_port->fidi_min = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) atmel_port->fidi_max = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) case 0x502: /* sam9x5, sama5d3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) atmel_port->fidi_min = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) atmel_port->fidi_max = 2047;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) atmel_port->fidi_min = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) atmel_port->fidi_max = 2047;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) } else if (name == dbgu_uart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) /* fallback for older SoCs: use version field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) version = atmel_uart_readl(port, ATMEL_US_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) switch (version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) case 0x302:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) case 0x10213:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) case 0x10302:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) dev_dbg(port->dev, "This version is usart\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) atmel_port->has_frac_baudrate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) atmel_port->has_hw_timer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) atmel_port->rtor = ATMEL_US_RTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) case 0x203:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) case 0x10202:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) dev_dbg(port->dev, "This version is uart\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * Perform initialization and enable port for reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static int atmel_startup(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct platform_device *pdev = to_platform_device(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * Ensure that no interrupts are enabled otherwise when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * request_irq() is called we could get stuck trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * handle an unexpected interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) atmel_uart_writel(port, ATMEL_US_IDR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) atmel_port->ms_irq_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * Allocate the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) retval = request_irq(port->irq, atmel_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) IRQF_SHARED | IRQF_COND_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) dev_name(&pdev->dev), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) dev_err(port->dev, "atmel_startup - Can't get irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) atomic_set(&atmel_port->tasklet_shutdown, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * Initialize DMA (if necessary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) atmel_init_property(atmel_port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) atmel_set_ops(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (atmel_port->prepare_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) retval = atmel_port->prepare_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) atmel_set_ops(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (atmel_port->prepare_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) retval = atmel_port->prepare_tx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) atmel_set_ops(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * Enable FIFO when available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (atmel_port->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) unsigned int txrdym = ATMEL_US_ONE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) unsigned int rxrdym = ATMEL_US_ONE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) unsigned int fmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) atmel_uart_writel(port, ATMEL_US_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) ATMEL_US_FIFOEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) ATMEL_US_RXFCLR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ATMEL_US_TXFLCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (atmel_use_dma_tx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) txrdym = ATMEL_US_FOUR_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (atmel_port->rts_high &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) atmel_port->rts_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) fmr |= ATMEL_US_FRTSC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) ATMEL_US_RXFTHRES(atmel_port->rts_high) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ATMEL_US_RXFTHRES2(atmel_port->rts_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) atmel_uart_writel(port, ATMEL_US_FMR, fmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Save current CSR for comparison in atmel_tasklet_func() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * Finally, enable the serial port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /* enable xmit & rcvr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) atmel_port->tx_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (atmel_use_pdc_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /* set UART timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (!atmel_port->has_hw_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) mod_timer(&atmel_port->uart_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) jiffies + uart_poll_timeout(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* set USART timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) atmel_uart_writel(port, atmel_port->rtor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) PDC_RX_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) /* enable PDC controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) } else if (atmel_use_dma_rx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /* set UART timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!atmel_port->has_hw_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) mod_timer(&atmel_port->uart_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) jiffies + uart_poll_timeout(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /* set USART timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) atmel_uart_writel(port, atmel_port->rtor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) PDC_RX_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) atmel_uart_writel(port, ATMEL_US_IER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ATMEL_US_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /* enable receive only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * Flush any TX data submitted for DMA. Called when the TX circular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * buffer is reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) static void atmel_flush_buffer(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (atmel_use_pdc_tx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) atmel_port->pdc_tx.ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) * in uart_flush_buffer(), the xmit circular buffer has just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * been cleared, so we have to reset tx_len accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) atmel_port->tx_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * Disable the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static void atmel_shutdown(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* Disable modem control lines interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) atmel_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) /* Disable interrupts at device level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) atmel_uart_writel(port, ATMEL_US_IDR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* Prevent spurious interrupts from scheduling the tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) atomic_inc(&atmel_port->tasklet_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * Prevent any tasklets being scheduled during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) del_timer_sync(&atmel_port->uart_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Make sure that no interrupt is on the fly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) synchronize_irq(port->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * Clear out any scheduled tasklets before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * we destroy the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) tasklet_kill(&atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) tasklet_kill(&atmel_port->tasklet_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * Ensure everything is stopped and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * disable port and break condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) atmel_stop_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) atmel_stop_tx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) * Shut-down the DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (atmel_port->release_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) atmel_port->release_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (atmel_port->release_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) atmel_port->release_tx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * Reset ring buffer pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) atmel_port->rx_ring.head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) atmel_port->rx_ring.tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * Free the interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) free_irq(port->irq, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) atmel_flush_buffer(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * Power / Clock management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static void atmel_serial_pm(struct uart_port *port, unsigned int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) unsigned int oldstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * Enable the peripheral clock for this serial port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * This is called on uart_open() or a resume event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) clk_prepare_enable(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) /* re-enable interrupts if we disabled some on suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* Back up the interrupt mask and disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) atmel_uart_writel(port, ATMEL_US_IDR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * Disable the peripheral clock for this serial port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * This is called on uart_close() or a suspend event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) clk_disable_unprepare(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * Change the port parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct ktermios *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* save the current mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /* reset the mode, clock divisor, parity, stop bits and data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ATMEL_US_PAR | ATMEL_US_USMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /* byte size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) switch (termios->c_cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) case CS5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) mode |= ATMEL_US_CHRL_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) case CS6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) mode |= ATMEL_US_CHRL_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) case CS7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) mode |= ATMEL_US_CHRL_7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) mode |= ATMEL_US_CHRL_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) /* stop bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (termios->c_cflag & CSTOPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) mode |= ATMEL_US_NBSTOP_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /* parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (termios->c_cflag & PARENB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) /* Mark or Space parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (termios->c_cflag & CMSPAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) mode |= ATMEL_US_PAR_MARK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) mode |= ATMEL_US_PAR_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) } else if (termios->c_cflag & PARODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) mode |= ATMEL_US_PAR_ODD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) mode |= ATMEL_US_PAR_EVEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) mode |= ATMEL_US_PAR_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) spin_lock_irqsave(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) port->read_status_mask = ATMEL_US_OVRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (termios->c_iflag & INPCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) port->read_status_mask |= ATMEL_US_RXBRK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (atmel_use_pdc_rx(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /* need to enable error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) * Characters to ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) port->ignore_status_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (termios->c_iflag & IGNBRK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) port->ignore_status_mask |= ATMEL_US_RXBRK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * If we're ignoring parity and break indicators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * ignore overruns too (for real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) if (termios->c_iflag & IGNPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) port->ignore_status_mask |= ATMEL_US_OVRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) /* TODO: Ignore all characters if CREAD is set.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /* update the per-port timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) uart_update_timeout(port, termios->c_cflag, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * save/disable interrupts. The tty layer will ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * transmitter is empty if requested by the caller, so there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * no need to wait for it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) imr = atmel_uart_readl(port, ATMEL_US_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) atmel_uart_writel(port, ATMEL_US_IDR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /* disable receiver and transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) atmel_port->tx_stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (port->rs485.flags & SER_RS485_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) atmel_uart_writel(port, ATMEL_US_TTGR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) port->rs485.delay_rts_after_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) mode |= ATMEL_US_USMODE_RS485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) } else if (port->iso7816.flags & SER_ISO7816_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* select mck clock, and output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /* set max iterations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) mode |= ATMEL_US_MAX_ITER(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if ((port->iso7816.flags & SER_ISO7816_T_PARAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) == SER_ISO7816_T(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) mode |= ATMEL_US_USMODE_ISO7816_T0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) mode |= ATMEL_US_USMODE_ISO7816_T1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) } else if (termios->c_cflag & CRTSCTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) /* RS232 with hardware handshake (RTS/CTS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (atmel_use_fifo(port) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * with ATMEL_US_USMODE_HWHS set, the controller will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * be able to drive the RTS pin high/low when the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * FIFO is above RXFTHRES/below RXFTHRES2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) * It will also disable the transmitter when the CTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * pin is high.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * This mode is not activated if CTS pin is a GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * because in this case, the transmitter is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * disabled (there must be an internal pull-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * responsible for this behaviour).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * If the RTS pin is a GPIO, the controller won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * able to drive it according to the FIFO thresholds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * but it will be handled by the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) mode |= ATMEL_US_USMODE_HWHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * For platforms without FIFO, the flow control is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * handled by the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) mode |= ATMEL_US_USMODE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* RS232 without hadware handshake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) mode |= ATMEL_US_USMODE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * Set the baud rate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * Fractional baudrate allows to setup output frequency more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * accurately. This feature is enabled only when using normal mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * Currently, OVER is always set to 0 so we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * baudrate = selected clock / (16 * (CD + FP / 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * 8 CD + FP = selected clock / (2 * baudrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (atmel_port->has_frac_baudrate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) cd = div >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) fp = div & ATMEL_US_FP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) cd = uart_get_divisor(port, baud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) cd /= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) mode |= ATMEL_US_USCLKS_MCK_DIV8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) quot = cd | fp << ATMEL_US_FP_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) atmel_uart_writel(port, ATMEL_US_BRGR, quot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) /* set the mode, clock divisor, parity, stop bits and data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) atmel_uart_writel(port, ATMEL_US_MR, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * when switching the mode, set the RTS line state according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * new mode, otherwise keep the former state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) unsigned int rts_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) /* let the hardware control the RTS line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) rts_state = ATMEL_US_RTSDIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) /* force RTS line to low level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) rts_state = ATMEL_US_RTSEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) atmel_uart_writel(port, ATMEL_US_CR, rts_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) atmel_port->tx_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) /* restore interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) atmel_uart_writel(port, ATMEL_US_IER, imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) /* CTS flow-control and modem-status interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (UART_ENABLE_MS(port, termios->c_cflag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) atmel_enable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) atmel_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) spin_unlock_irqrestore(&port->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (termios->c_line == N_PPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) port->flags |= UPF_HARDPPS_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) spin_lock_irq(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) atmel_enable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) spin_unlock_irq(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) port->flags &= ~UPF_HARDPPS_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (!UART_ENABLE_MS(port, termios->c_cflag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) spin_lock_irq(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) atmel_disable_ms(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) spin_unlock_irq(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * Return string describing the specified port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) static const char *atmel_type(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * Release the memory region(s) being used by 'port'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static void atmel_release_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) struct platform_device *mpdev = to_platform_device(port->dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) int size = resource_size(mpdev->resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) release_mem_region(port->mapbase, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (port->flags & UPF_IOREMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) iounmap(port->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) port->membase = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * Request the memory region(s) being used by 'port'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) static int atmel_request_port(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) struct platform_device *mpdev = to_platform_device(port->dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) int size = resource_size(mpdev->resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (!request_mem_region(port->mapbase, size, "atmel_serial"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (port->flags & UPF_IOREMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) port->membase = ioremap(port->mapbase, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (port->membase == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) release_mem_region(port->mapbase, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * Configure/autoconfigure the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static void atmel_config_port(struct uart_port *port, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (flags & UART_CONFIG_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) port->type = PORT_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) atmel_request_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * Verify the new serial_struct (for TIOCSSERIAL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (port->irq != ser->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (ser->io_type != SERIAL_IO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (port->uartclk / 16 != ser->baud_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (port->mapbase != (unsigned long)ser->iomem_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (port->iobase != ser->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (ser->hub6 != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) static int atmel_poll_get_char(struct uart_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return atmel_uart_read_char(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) atmel_uart_write_char(port, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static const struct uart_ops atmel_pops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) .tx_empty = atmel_tx_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) .set_mctrl = atmel_set_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) .get_mctrl = atmel_get_mctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) .stop_tx = atmel_stop_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) .start_tx = atmel_start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) .stop_rx = atmel_stop_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) .enable_ms = atmel_enable_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) .break_ctl = atmel_break_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) .startup = atmel_startup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) .shutdown = atmel_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) .flush_buffer = atmel_flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) .set_termios = atmel_set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) .set_ldisc = atmel_set_ldisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) .type = atmel_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) .release_port = atmel_release_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) .request_port = atmel_request_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) .config_port = atmel_config_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) .verify_port = atmel_verify_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) .pm = atmel_serial_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) #ifdef CONFIG_CONSOLE_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) .poll_get_char = atmel_poll_get_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) .poll_put_char = atmel_poll_put_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * Configure the port from the platform device resource info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static int atmel_init_port(struct atmel_uart_port *atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct uart_port *port = &atmel_port->uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) atmel_init_property(atmel_port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) atmel_set_ops(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) port->iotype = UPIO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) port->ops = &atmel_pops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) port->fifosize = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) port->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) port->mapbase = mpdev->resource[0].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) port->irq = mpdev->resource[1].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) port->rs485_config = atmel_config_rs485;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) port->iso7816_config = atmel_config_iso7816;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) port->membase = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) ret = uart_get_rs485_mode(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) /* for console, the clock could already be configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (!atmel_port->clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) atmel_port->clk = clk_get(&mpdev->dev, "usart");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (IS_ERR(atmel_port->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) ret = PTR_ERR(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) atmel_port->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) ret = clk_prepare_enable(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) clk_put(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) atmel_port->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) port->uartclk = clk_get_rate(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) clk_disable_unprepare(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) /* only enable clock when USART is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) * ENDTX|TXBUFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (atmel_uart_is_half_duplex(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) else if (atmel_use_pdc_tx(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) port->fifosize = PDC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) atmel_port->tx_done_mask = ATMEL_US_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static void atmel_console_putchar(struct uart_port *port, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) atmel_uart_write_char(port, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) * Interrupts are disabled on entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) static void atmel_console_write(struct console *co, const char *s, u_int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct uart_port *port = &atmel_ports[co->index].uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) unsigned int status, imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) unsigned int pdc_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * First, save IMR and then disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) imr = atmel_uart_readl(port, ATMEL_US_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) atmel_uart_writel(port, ATMEL_US_IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) ATMEL_US_RXRDY | atmel_port->tx_done_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /* Store PDC transmit status and disable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* Make sure that tx path is actually able to send characters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) atmel_port->tx_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) uart_console_write(port, s, count, atmel_console_putchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * Finally, wait for transmitter to become empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * and restore IMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) status = atmel_uart_readl(port, ATMEL_US_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) } while (!(status & ATMEL_US_TXRDY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) /* Restore PDC transmit status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (pdc_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) /* set interrupts back the way they were */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) atmel_uart_writel(port, ATMEL_US_IER, imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * If the port was already initialised (eg, by a boot loader),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * try to determine the current setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) static void __init atmel_console_get_options(struct uart_port *port, int *baud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) int *parity, int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) unsigned int mr, quot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * If the baud rate generator isn't running, the port wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * initialized by the boot loader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (!quot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (mr == ATMEL_US_CHRL_8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) *bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) *bits = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (mr == ATMEL_US_PAR_EVEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) *parity = 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) else if (mr == ATMEL_US_PAR_ODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) *parity = 'o';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) * The serial core only rounds down when matching this to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) * supported baud rate. Make sure we don't end up slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * lower than one of those, as it would make us fall through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * to a much lower baud rate than we really want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) *baud = port->uartclk / (16 * (quot - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) static int __init atmel_console_setup(struct console *co, char *options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct uart_port *port = &atmel_ports[co->index].uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) int baud = 115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) int bits = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) int parity = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) int flow = 'n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (port->membase == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /* Port not initialized yet - delay setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) ret = clk_prepare_enable(atmel_ports[co->index].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) atmel_uart_writel(port, ATMEL_US_IDR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) atmel_port->tx_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) uart_parse_options(options, &baud, &parity, &bits, &flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) atmel_console_get_options(port, &baud, &parity, &bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) return uart_set_options(port, co, baud, parity, bits, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) static struct uart_driver atmel_uart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) static struct console atmel_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) .name = ATMEL_DEVICENAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) .write = atmel_console_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) .device = uart_console_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) .setup = atmel_console_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) .flags = CON_PRINTBUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) .index = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) .data = &atmel_uart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) #define ATMEL_CONSOLE_DEVICE (&atmel_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) #define ATMEL_CONSOLE_DEVICE NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) static struct uart_driver atmel_uart = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) .driver_name = "atmel_serial",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) .dev_name = ATMEL_DEVICENAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) .major = SERIAL_ATMEL_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) .minor = MINOR_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) .nr = ATMEL_MAX_UART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) .cons = ATMEL_CONSOLE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) static bool atmel_serial_clk_will_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) #ifdef CONFIG_ARCH_AT91
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) return at91_suspend_entering_slow_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static int atmel_serial_suspend(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) struct uart_port *port = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (uart_console(port) && console_suspend_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /* Drain the TX shifter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ATMEL_US_TXEMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (uart_console(port) && !console_suspend_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) /* Cache register values as we won't get a full shutdown/startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) * cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) atmel_port->cache.rtor = atmel_uart_readl(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) atmel_port->rtor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /* we can not wake up if we're running on slow clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (atmel_serial_clk_will_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) spin_lock_irqsave(&atmel_port->lock_suspended, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) atmel_port->suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) device_set_wakeup_enable(&pdev->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) uart_suspend_port(&atmel_uart, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) static int atmel_serial_resume(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct uart_port *port = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (uart_console(port) && !console_suspend_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) atmel_uart_writel(port, atmel_port->rtor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) atmel_port->cache.rtor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) if (atmel_port->fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) atmel_uart_writel(port, ATMEL_US_FMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) atmel_port->cache.fmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) atmel_uart_writel(port, ATMEL_US_FIER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) atmel_port->cache.fimr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) atmel_start_rx(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) spin_lock_irqsave(&atmel_port->lock_suspended, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) if (atmel_port->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) atmel_handle_receive(port, atmel_port->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) atmel_handle_status(port, atmel_port->pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) atmel_port->pending_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) atmel_handle_transmit(port, atmel_port->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) atmel_port->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) atmel_port->suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) uart_resume_port(&atmel_uart, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) #define atmel_serial_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) #define atmel_serial_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) atmel_port->fifo_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) atmel_port->rts_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) atmel_port->rts_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (of_property_read_u32(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) "atmel,fifo-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) &atmel_port->fifo_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (!atmel_port->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) atmel_port->fifo_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) dev_err(&pdev->dev, "Invalid FIFO size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * 0 <= rts_low <= rts_high <= fifo_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * Once their CTS line asserted by the remote peer, some x86 UARTs tend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * to flush their internal TX FIFO, commonly up to 16 data, before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * actually stopping to send new data. So we try to set the RTS High
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * Threshold to a reasonably high value respecting this 16 data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) * empirical rule when possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) dev_info(&pdev->dev, "Using FIFO (%u data)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) atmel_port->fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) atmel_port->rts_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) atmel_port->rts_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) static int atmel_serial_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct atmel_uart_port *atmel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) struct device_node *np = pdev->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) bool rs485_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * In device tree there is no node with "atmel,at91rm9200-usart-serial"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * as compatible string. This driver is probed by at91-usart mfd driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * which is just a wrapper over the atmel_serial driver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * spi-at91-usart driver. All attributes needed by this driver are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * found in of_node of parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) pdev->dev.of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) ret = of_alias_get_id(np, "serial");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) /* port id not found in platform data nor device-tree aliases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) * auto-enumerate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (ret >= ATMEL_MAX_UART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (test_and_set_bit(ret, atmel_ports_in_use)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) /* port already in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) atmel_port = &atmel_ports[ret];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) atmel_port->backup_imr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) atmel_port->uart.line = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) atmel_serial_probe_fifos(atmel_port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) atomic_set(&atmel_port->tasklet_shutdown, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) spin_lock_init(&atmel_port->lock_suspended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) ret = atmel_init_port(atmel_port, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) goto err_clear_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (IS_ERR(atmel_port->gpios)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) ret = PTR_ERR(atmel_port->gpios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) goto err_clear_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) if (!atmel_use_pdc_rx(&atmel_port->uart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) sizeof(struct atmel_uart_char),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) goto err_alloc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) atmel_port->rx_ring.buf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) goto err_add_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (uart_console(&atmel_port->uart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) * The serial core enabled the clock for us, so undo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) * the clk_prepare_enable() in atmel_console_setup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) clk_disable_unprepare(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) device_init_wakeup(&pdev->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) platform_set_drvdata(pdev, atmel_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) * The peripheral clock has been disabled by atmel_init_port():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) * enable it before accessing I/O registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) clk_prepare_enable(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (rs485_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) ATMEL_US_USMODE_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) ATMEL_US_RTSEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * Get port name of usart or uart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) atmel_get_ip_name(&atmel_port->uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) * The peripheral clock can now safely be disabled till the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) clk_disable_unprepare(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) err_add_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) kfree(atmel_port->rx_ring.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) atmel_port->rx_ring.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) err_alloc_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (!uart_console(&atmel_port->uart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) clk_put(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) atmel_port->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) err_clear_bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) clear_bit(atmel_port->uart.line, atmel_ports_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) * Even if the driver is not modular, it makes sense to be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * unbind a device: there can be many bound devices, and there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * situations where dynamic binding and unbinding can be useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) * For example, a connected device can require a specific firmware update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) * protocol that needs bitbanging on IO lines, but use the regular serial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) * port in the normal case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) static int atmel_serial_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) struct uart_port *port = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) tasklet_kill(&atmel_port->tasklet_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) tasklet_kill(&atmel_port->tasklet_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) device_init_wakeup(&pdev->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) ret = uart_remove_one_port(&atmel_uart, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) kfree(atmel_port->rx_ring.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) /* "port" is allocated statically, so we shouldn't free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) clear_bit(port->line, atmel_ports_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) clk_put(atmel_port->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) atmel_port->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) pdev->dev.of_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) static struct platform_driver atmel_serial_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) .probe = atmel_serial_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) .remove = atmel_serial_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) .suspend = atmel_serial_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) .resume = atmel_serial_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) .name = "atmel_usart_serial",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) .of_match_table = of_match_ptr(atmel_serial_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static int __init atmel_serial_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) ret = uart_register_driver(&atmel_uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) ret = platform_driver_register(&atmel_serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) uart_unregister_driver(&atmel_uart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) device_initcall(atmel_serial_init);