^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-1.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Device driver for Microgate SyncLink GT serial adapters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * written by Paul Fulghum for Microgate Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * paulkf@microgate.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Microgate and SyncLink are trademarks of Microgate Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * DEBUG OUTPUT DEFINITIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * uncomment lines below to enable specific types of debug output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * DBGINFO information - most verbose output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * DBGERR serious errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * DBGBH bottom half service routine debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * DBGISR interrupt service routine debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * DBGDATA output receive and transmit data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * DBGTBUF output transmit DMA buffers and registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * DBGRBUF output receive DMA buffers and registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*#define DBGTBUF(info) dump_tbufs(info)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*#define DBGRBUF(info) dump_rbufs(info)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/tty_flip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/termios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/hdlc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/synclink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SYNCLINK_GENERIC_HDLC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SYNCLINK_GENERIC_HDLC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * module identification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static char *driver_name = "SyncLink GT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static char *slgt_driver_name = "synclink_gt";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static char *tty_dev_prefix = "ttySLG";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define MGSL_MAGIC 0x5401
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define MAX_DEVICES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static const struct pci_device_id pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {0,}, /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) MODULE_DEVICE_TABLE(pci, pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int init_one(struct pci_dev *dev,const struct pci_device_id *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void remove_one(struct pci_dev *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static struct pci_driver pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .name = "synclink_gt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .id_table = pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .probe = init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .remove = remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static bool pci_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * module configuration and status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static struct slgt_info *slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static int slgt_device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int ttymajor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int debug_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int maxframe[MAX_DEVICES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) module_param(ttymajor, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) module_param(debug_level, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) module_param_array(maxframe, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * tty support and callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct tty_driver *serial_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void wait_until_sent(struct tty_struct *tty, int timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void flush_buffer(struct tty_struct *tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void tx_release(struct tty_struct *tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * generic HDLC support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define dev_to_port(D) (dev_to_hdlc(D)->priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * device specific structures, macros and functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define SLGT_MAX_PORTS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define SLGT_REG_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * conditional wait facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cond_wait {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct cond_wait *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) wait_queue_head_t q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) wait_queue_entry_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void flush_cond_wait(struct cond_wait **head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * DMA buffer descriptor and access macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct slgt_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __le16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __le16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __le32 pbuf; /* physical address of data buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __le32 next; /* physical address of next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* driver book keeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) char *buf; /* virtual address of data buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned int pdesc; /* physical address of this descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dma_addr_t buf_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned short buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define desc_count(a) (le16_to_cpu((a).count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define desc_status(a) (le16_to_cpu((a).status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define desc_eof(a) (le16_to_cpu((a).status) & BIT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define desc_abort(a) (le16_to_cpu((a).status) & BIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct _input_signal_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int ri_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int ri_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int dsr_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int dsr_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int dcd_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int dcd_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int cts_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int cts_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * device instance data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct slgt_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void *if_ptr; /* General purpose pointer (used by SPPP) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct tty_port port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct slgt_info *next_device; /* device list link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) char device_name[25];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int port_count; /* count of ports on adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int adapter_num; /* adapter instance number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int port_num; /* port instance number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* array of pointers to port contexts on this adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct slgt_info *port_array[SLGT_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int line; /* tty line instance number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct mgsl_icount icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int x_char; /* xon/xoff character */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned int read_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned int ignore_status_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) wait_queue_head_t status_event_wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) wait_queue_head_t event_wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct timer_list tx_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct timer_list rx_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int gpio_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct cond_wait *gpio_wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) spinlock_t lock; /* spinlock for synchronizing with ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct work_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u32 pending_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bool bh_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bool bh_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int isr_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool irq_requested; /* true if IRQ requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool irq_occurred; /* for diagnostics use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* device configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned int irq_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned char __iomem * reg_addr; /* memory mapped registers address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 phys_reg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bool reg_addr_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) MGSL_PARAMS params; /* communications parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 idle_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 max_frame_size; /* as set by device config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned int rbuf_fill_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned int rx_pio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int if_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned int base_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int xsync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned int xctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* device status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bool rx_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bool rx_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bool tx_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) bool tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned char signals; /* serial signal states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int init_error; /* initialization error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned char *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) char *flag_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bool drop_rts_on_tx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct _input_signal_events input_signal_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int dcd_chkcount; /* check counts to prevent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int cts_chkcount; /* too many IRQs if a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int dsr_chkcount; /* is floating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int ri_chkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) char *bufs; /* virtual address of DMA buffer lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned int rbuf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct slgt_desc *rbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned int rbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int rbuf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int rbuf_fill_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned short rbuf_fill_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int tbuf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct slgt_desc *tbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int tbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int tbuf_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned char *tmp_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int tmp_rbuf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* SPPP/Cisco HDLC device parts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int netcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) spinlock_t netlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static MGSL_PARAMS default_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .mode = MGSL_MODE_HDLC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .loopback = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .flags = HDLC_FLAG_UNDERRUN_ABORT15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .encoding = HDLC_ENCODING_NRZI_SPACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .clock_speed = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .addr_filter = 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .crc_type = HDLC_CRC_16_CCITT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .preamble = HDLC_PREAMBLE_PATTERN_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .data_rate = 9600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .data_bits = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) .stop_bits = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .parity = ASYNC_PARITY_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define BH_RECEIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define BH_TRANSMIT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define BH_STATUS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define IO_PIN_SHUTDOWN_LIMIT 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define DMABUFSIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define DESC_LIST_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define MASK_PARITY BIT1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define MASK_FRAMING BIT0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define MASK_BREAK BIT14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define MASK_OVERRUN BIT4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define GSR 0x00 /* global status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define JCR 0x04 /* JTAG control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define IODR 0x08 /* GPIO direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define IOER 0x0c /* GPIO interrupt enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define IOVR 0x10 /* GPIO value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define IOSR 0x14 /* GPIO interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define TDR 0x80 /* tx data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #define RDR 0x80 /* rx data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define TCR 0x82 /* tx control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define TIR 0x84 /* tx idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #define TPR 0x85 /* tx preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define RCR 0x86 /* rx control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define VCR 0x88 /* V.24 control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define CCR 0x89 /* clock control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define BDR 0x8a /* baud divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define SCR 0x8c /* serial control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define SSR 0x8e /* serial status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #define RDCSR 0x90 /* rx DMA control/status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #define TDCSR 0x94 /* tx DMA control/status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #define RDDAR 0x98 /* rx DMA descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #define TDDAR 0x9c /* tx DMA descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define XSR 0x40 /* extended sync pattern */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define XCR 0x44 /* extended control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define RXIDLE BIT14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #define RXBREAK BIT14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define IRQ_TXDATA BIT13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define IRQ_TXIDLE BIT12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define IRQ_TXUNDER BIT11 /* HDLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define IRQ_RXDATA BIT10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define IRQ_RXIDLE BIT9 /* HDLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define IRQ_RXBREAK BIT9 /* async */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define IRQ_RXOVER BIT8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define IRQ_DSR BIT7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define IRQ_CTS BIT6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #define IRQ_DCD BIT5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #define IRQ_RI BIT4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define IRQ_ALL 0x3ff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define IRQ_MASTER BIT0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define slgt_irq_on(info, mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #define slgt_irq_off(info, mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static __u8 rd_reg8(struct slgt_info *info, unsigned int addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void msc_set_vcr(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int startup(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void shutdown(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void program_hw(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void change_params(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static int adapter_test(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void reset_port(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void async_mode(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void sync_mode(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void rx_stop(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void rx_start(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void reset_rbufs(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static bool rx_get_frame(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static bool rx_get_buf(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void tx_start(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void tx_stop(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void tx_set_idle(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static unsigned int tbuf_bytes(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static void reset_tbufs(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void tdma_reset(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void get_gtsignals(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void set_gtsignals(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void set_rate(struct slgt_info *info, u32 data_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void bh_transmit(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void isr_txeom(struct slgt_info *info, unsigned short status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void tx_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void rx_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * ioctl handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int get_txidle(struct slgt_info *info, int __user *idle_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int set_txidle(struct slgt_info *info, int idle_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int tx_enable(struct slgt_info *info, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int tx_abort(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int rx_enable(struct slgt_info *info, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int modem_input_wait(struct slgt_info *info,int arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int get_interface(struct slgt_info *info, int __user *if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int set_interface(struct slgt_info *info, int if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static int get_xsync(struct slgt_info *info, int __user *if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int set_xsync(struct slgt_info *info, int if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int get_xctrl(struct slgt_info *info, int __user *if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int set_xctrl(struct slgt_info *info, int if_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * driver functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void release_resources(struct slgt_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * DEBUG OUTPUT CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #ifndef DBGINFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #define DBGINFO(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #ifndef DBGERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #define DBGERR(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #ifndef DBGBH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #define DBGBH(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #ifndef DBGISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define DBGISR(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #ifdef DBGDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int linecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) printk("%s %s data:\n",info->device_name, label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) while(count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) linecount = (count > 16) ? 16 : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) for(i=0; i < linecount; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) printk("%02X ",(unsigned char)data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) for(;i<17;i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) printk(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for(i=0;i<linecount;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (data[i]>=040 && data[i]<=0176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) printk("%c",data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) printk(".");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) data += linecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) count -= linecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #define DBGDATA(info, buf, size, label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #ifdef DBGTBUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void dump_tbufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) printk("tbuf_current=%d\n", info->tbuf_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (i=0 ; i < info->tbuf_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) printk("%d: count=%04X status=%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #define DBGTBUF(info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #ifdef DBGRBUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static void dump_rbufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) printk("rbuf_current=%d\n", info->rbuf_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) for (i=0 ; i < info->rbuf_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) printk("%d: count=%04X status=%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #define DBGRBUF(info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #ifdef SANITY_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) printk("null struct slgt_info for (%s) in %s\n", devname, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (info->magic != MGSL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * line discipline callback wrappers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * The wrappers maintain line discipline references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * while calling into the line discipline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * ldisc_receive_buf - pass receive data to line discipline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void ldisc_receive_buf(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) const __u8 *data, char *flags, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct tty_ldisc *ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ld = tty_ldisc_ref(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (ld) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ld->ops->receive_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ld->ops->receive_buf(tty, data, flags, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) tty_ldisc_deref(ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* tty callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static int open(struct tty_struct *tty, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct slgt_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int retval, line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) line = tty->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (line >= slgt_device_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) info = slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) while(info && info->line != line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) info = info->next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (sanity_check(info, tty->name, "open"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (info->init_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) tty->driver_data = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) info->port.tty = tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) mutex_lock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) spin_lock_irqsave(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (info->netcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) info->port.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (info->port.count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* 1st open on this device, init hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) retval = startup(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) retval = block_til_ready(tty, filp, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (tty->count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) info->port.tty = NULL; /* tty layer will release tty struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if(info->port.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) info->port.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) DBGINFO(("%s open rc=%d\n", info->device_name, retval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void close(struct tty_struct *tty, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (sanity_check(info, tty->name, "close"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (tty_port_close_start(&info->port, tty, filp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mutex_lock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (tty_port_initialized(&info->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) wait_until_sent(tty, info->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) flush_buffer(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tty_ldisc_flush(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) shutdown(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tty_port_close_end(&info->port, tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) info->port.tty = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static void hangup(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (sanity_check(info, tty->name, "hangup"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) DBGINFO(("%s hangup\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) flush_buffer(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) mutex_lock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) shutdown(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_lock_irqsave(&info->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) info->port.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) info->port.tty = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) spin_unlock_irqrestore(&info->port.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) tty_port_set_active(&info->port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) wake_up_interruptible(&info->port.open_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) DBGINFO(("%s set_termios\n", tty->driver->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) change_params(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Handle transition to B0 status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Handle transition away from B0 status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) info->signals |= SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (!C_CRTSCTS(tty) || !tty_throttled(tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) info->signals |= SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Handle turning off CRTSCTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) tty->hw_stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tx_release(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void update_tx_timer(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * use worst case speed of 1200bps to calculate transmit timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (info->params.mode == MGSL_MODE_HDLC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int timeout = (tbuf_bytes(info) * 7) + 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static int write(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) const unsigned char *buf, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (sanity_check(info, tty->name, "write"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) DBGINFO(("%s write count=%d\n", info->device_name, count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!info->tx_buf || (count > info->max_frame_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!count || tty->stopped || tty->hw_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (info->tx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* send accumulated data from send_char() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!tx_load(info, info->tx_buf, info->tx_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) info->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (tx_load(info, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) DBGINFO(("%s write rc=%d\n", info->device_name, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int put_char(struct tty_struct *tty, unsigned char ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (sanity_check(info, tty->name, "put_char"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!info->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (info->tx_count < info->max_frame_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) info->tx_buf[info->tx_count++] = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void send_xchar(struct tty_struct *tty, char ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (sanity_check(info, tty->name, "send_xchar"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) info->x_char = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (!info->tx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) tx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void wait_until_sent(struct tty_struct *tty, int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned long orig_jiffies, char_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!info )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (sanity_check(info, tty->name, "wait_until_sent"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) DBGINFO(("%s wait_until_sent entry\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!tty_port_initialized(&info->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) orig_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* Set check interval to 1/5 of estimated time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * send a character, and make it at least 1. The check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * interval should also be less than the timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Note: use tight timings here to satisfy the NIST-PCTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (info->params.data_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) char_time = info->timeout/(32 * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!char_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) char_time++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) char_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) char_time = min_t(unsigned long, char_time, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) while (info->tx_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) msleep_interruptible(jiffies_to_msecs(char_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (timeout && time_after(jiffies, orig_jiffies + timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) DBGINFO(("%s wait_until_sent exit\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int write_room(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (sanity_check(info, tty->name, "write_room"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) DBGINFO(("%s write_room=%d\n", info->device_name, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static void flush_chars(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (sanity_check(info, tty->name, "flush_chars"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (info->tx_count <= 0 || tty->stopped ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) tty->hw_stopped || !info->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) DBGINFO(("%s flush_chars start transmit\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) info->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static void flush_buffer(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (sanity_check(info, tty->name, "flush_buffer"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) DBGINFO(("%s flush_buffer\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) info->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) tty_wakeup(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * throttle (stop) transmitter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static void tx_hold(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (sanity_check(info, tty->name, "tx_hold"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) DBGINFO(("%s tx_hold\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * release (start) transmitter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void tx_release(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (sanity_check(info, tty->name, "tx_release"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) DBGINFO(("%s tx_release\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) info->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Service an IOCTL request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * tty pointer to tty instance data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * cmd IOCTL command code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * arg command argument/context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Return 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static int ioctl(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (sanity_check(info, tty->name, "ioctl"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (cmd != TIOCMIWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (tty_io_error(tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) case MGSL_IOCWAITEVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return wait_mgsl_event(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) case TIOCMIWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return modem_input_wait(info,(int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case MGSL_IOCSGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return set_gpio(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) case MGSL_IOCGGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return get_gpio(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) case MGSL_IOCWAITGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return wait_gpio(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) case MGSL_IOCGXSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return get_xsync(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) case MGSL_IOCSXSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return set_xsync(info, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case MGSL_IOCGXCTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return get_xctrl(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) case MGSL_IOCSXCTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return set_xctrl(info, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) mutex_lock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case MGSL_IOCGPARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ret = get_params(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case MGSL_IOCSPARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = set_params(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) case MGSL_IOCGTXIDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ret = get_txidle(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) case MGSL_IOCSTXIDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret = set_txidle(info, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case MGSL_IOCTXENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = tx_enable(info, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case MGSL_IOCRXENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ret = rx_enable(info, (int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) case MGSL_IOCTXABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ret = tx_abort(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) case MGSL_IOCGSTATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ret = get_stats(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) case MGSL_IOCGIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ret = get_interface(info, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) case MGSL_IOCSIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = set_interface(info,(int)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) mutex_unlock(&info->port.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int get_icount(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct serial_icounter_struct *icount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct mgsl_icount cnow; /* kernel counter temps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cnow = info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) icount->cts = cnow.cts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) icount->dsr = cnow.dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) icount->rng = cnow.rng;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) icount->dcd = cnow.dcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) icount->rx = cnow.rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) icount->tx = cnow.tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) icount->frame = cnow.frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) icount->overrun = cnow.overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) icount->parity = cnow.parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) icount->brk = cnow.brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) icount->buf_overrun = cnow.buf_overrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * support for 32 bit ioctl calls on 64 bit systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct MGSL_PARAMS32 tmp_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) DBGINFO(("%s get_params32\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) memset(&tmp_params, 0, sizeof(tmp_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) tmp_params.mode = (compat_ulong_t)info->params.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) tmp_params.loopback = info->params.loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) tmp_params.flags = info->params.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) tmp_params.encoding = info->params.encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) tmp_params.addr_filter = info->params.addr_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) tmp_params.crc_type = info->params.crc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) tmp_params.preamble_length = info->params.preamble_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) tmp_params.preamble = info->params.preamble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) tmp_params.data_rate = (compat_ulong_t)info->params.data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) tmp_params.data_bits = info->params.data_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) tmp_params.stop_bits = info->params.stop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) tmp_params.parity = info->params.parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct MGSL_PARAMS32 tmp_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) DBGINFO(("%s set_params32\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_lock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) info->base_clock = tmp_params.clock_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) info->params.mode = tmp_params.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) info->params.loopback = tmp_params.loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) info->params.flags = tmp_params.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) info->params.encoding = tmp_params.encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) info->params.clock_speed = tmp_params.clock_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) info->params.addr_filter = tmp_params.addr_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) info->params.crc_type = tmp_params.crc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) info->params.preamble_length = tmp_params.preamble_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) info->params.preamble = tmp_params.preamble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) info->params.data_rate = tmp_params.data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) info->params.data_bits = tmp_params.data_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) info->params.stop_bits = tmp_params.stop_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) info->params.parity = tmp_params.parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_unlock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static long slgt_compat_ioctl(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (sanity_check(info, tty->name, "compat_ioctl"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) case MGSL_IOCSPARAMS32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) rc = set_params32(info, compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) case MGSL_IOCGPARAMS32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rc = get_params32(info, compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case MGSL_IOCGPARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) case MGSL_IOCSPARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) case MGSL_IOCGTXIDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) case MGSL_IOCGSTATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) case MGSL_IOCWAITEVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case MGSL_IOCGIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case MGSL_IOCSGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) case MGSL_IOCGGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) case MGSL_IOCWAITGPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) case MGSL_IOCGXSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case MGSL_IOCGXCTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rc = ioctl(tty, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) #define slgt_compat_ioctl NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #endif /* ifdef CONFIG_COMPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * proc fs support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static inline void line_info(struct seq_file *m, struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) char stat_buf[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) info->device_name, info->phys_reg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) info->irq_level, info->max_frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* output current serial signal states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) stat_buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) stat_buf[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (info->signals & SerialSignal_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) strcat(stat_buf, "|RTS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (info->signals & SerialSignal_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) strcat(stat_buf, "|CTS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (info->signals & SerialSignal_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) strcat(stat_buf, "|DTR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (info->signals & SerialSignal_DSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) strcat(stat_buf, "|DSR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (info->signals & SerialSignal_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) strcat(stat_buf, "|CD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (info->signals & SerialSignal_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) strcat(stat_buf, "|RI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (info->params.mode != MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) seq_printf(m, "\tHDLC txok:%d rxok:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) info->icount.txok, info->icount.rxok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (info->icount.txunder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) seq_printf(m, " txunder:%d", info->icount.txunder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (info->icount.txabort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) seq_printf(m, " txabort:%d", info->icount.txabort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (info->icount.rxshort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) seq_printf(m, " rxshort:%d", info->icount.rxshort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (info->icount.rxlong)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) seq_printf(m, " rxlong:%d", info->icount.rxlong);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (info->icount.rxover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) seq_printf(m, " rxover:%d", info->icount.rxover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (info->icount.rxcrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) seq_printf(m, "\tASYNC tx:%d rx:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) info->icount.tx, info->icount.rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (info->icount.frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) seq_printf(m, " fe:%d", info->icount.frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (info->icount.parity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) seq_printf(m, " pe:%d", info->icount.parity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (info->icount.brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) seq_printf(m, " brk:%d", info->icount.brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (info->icount.overrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) seq_printf(m, " oe:%d", info->icount.overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* Append serial signal status to end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) seq_printf(m, " %s\n", stat_buf+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) info->tx_active,info->bh_requested,info->bh_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) info->pending_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Called to print information about devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static int synclink_gt_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct slgt_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) seq_puts(m, "synclink_gt driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) info = slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) while( info ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) line_info(m, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) info = info->next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * return count of bytes in transmit buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static int chars_in_buffer(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (sanity_check(info, tty->name, "chars_in_buffer"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) count = tbuf_bytes(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * signal remote device to throttle send data (our receive data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void throttle(struct tty_struct * tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (sanity_check(info, tty->name, "throttle"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) DBGINFO(("%s throttle\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (I_IXOFF(tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) send_xchar(tty, STOP_CHAR(tty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (C_CRTSCTS(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) info->signals &= ~SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * signal remote device to stop throttling send data (our receive data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static void unthrottle(struct tty_struct * tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (sanity_check(info, tty->name, "unthrottle"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) DBGINFO(("%s unthrottle\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (I_IXOFF(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (info->x_char)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) info->x_char = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) send_xchar(tty, START_CHAR(tty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (C_CRTSCTS(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) info->signals |= SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * set or clear transmit break condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * break_state -1=set break condition, 0=clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static int set_break(struct tty_struct *tty, int break_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) unsigned short value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (sanity_check(info, tty->name, "set_break"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) value = rd_reg16(info, TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (break_state == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) value |= BIT6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) value &= ~BIT6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) wr_reg16(info, TCR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * @encoding: serial encoding setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * @parity: FCS setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * Set encoding and frame check sequence (FCS) options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * Return: 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) unsigned short parity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) unsigned char new_encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) unsigned short new_crctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* return error if TTY interface open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (info->port.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) DBGINFO(("%s hdlcdev_attach\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) switch (encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) default: return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) switch (parity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) default: return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) info->params.encoding = new_encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) info->params.crc_type = new_crctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* if network interface up, reprogram hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * hdlcdev_xmit - called by generic HDLC layer to send a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * @skb: socket buffer containing HDLC frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) DBGINFO(("%s hdlc_xmit\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* stop sending until this frame completes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* update network statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* save start time for transmit timeout detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) tx_load(info, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* done with socket buffer, so free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * hdlcdev_open - called by network layer when interface enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Claim resources and initialize hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * Return: 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static int hdlcdev_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (!try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) DBGINFO(("%s hdlcdev_open\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* generic HDLC layer open processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) rc = hdlc_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* arbitrate between network and tty opens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) spin_lock_irqsave(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (info->port.count != 0 || info->netcount != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) DBGINFO(("%s hdlc_open busy\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) info->netcount=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* claim resources and init adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if ((rc = startup(info)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) spin_lock_irqsave(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) info->netcount=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* assert RTS and DTR, apply hardware settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) info->signals |= SerialSignal_RTS | SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /* enable network layer transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* inform generic HDLC layer of current DCD status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (info->signals & SerialSignal_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * hdlcdev_close - called by network layer when interface is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * Shutdown hardware and release resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * Return: 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static int hdlcdev_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) DBGINFO(("%s hdlcdev_close\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* shutdown adapter and release resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) shutdown(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) hdlc_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) spin_lock_irqsave(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) info->netcount=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) spin_unlock_irqrestore(&info->netlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * @ifr: pointer to network interface request structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * @cmd: IOCTL command code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * Return: 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) const size_t size = sizeof(sync_serial_settings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sync_serial_settings new_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* return error if TTY interface open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (info->port.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (cmd != SIOCWANDEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return hdlc_ioctl(dev, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) memset(&new_line, 0, sizeof(new_line));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) switch(ifr->ifr_settings.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) case IF_GET_IFACE: /* return current sync_serial_settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (ifr->ifr_settings.size < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ifr->ifr_settings.size = size; /* data size wanted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) switch (flags){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) default: new_line.clock_type = CLOCK_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) new_line.clock_rate = info->params.clock_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) new_line.loopback = info->params.loopback ? 1:0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (copy_to_user(line, &new_line, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if(!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (copy_from_user(&new_line, line, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) switch (new_line.clock_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) case CLOCK_DEFAULT: flags = info->params.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) default: return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (new_line.loopback != 0 && new_line.loopback != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) info->params.flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) info->params.loopback = new_line.loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) info->params.clock_speed = new_line.clock_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) info->params.clock_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* if network interface up, reprogram hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return hdlc_ioctl(dev, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * @dev: pointer to network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct slgt_info *info = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * hdlcdev_tx_done - called by device driver when transmit completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * @info: pointer to device instance information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * Reenable network layer transmit if stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void hdlcdev_tx_done(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (netif_queue_stopped(info->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) netif_wake_queue(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * hdlcdev_rx - called by device driver when frame received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * @info: pointer to device instance information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * @buf: pointer to buffer contianing frame data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * @size: count of data bytes in buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * Pass frame to network layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct sk_buff *skb = dev_alloc_skb(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct net_device *dev = info->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) DBGINFO(("%s hdlcdev_rx\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) skb_put_data(skb, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) skb->protocol = hdlc_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) dev->stats.rx_bytes += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static const struct net_device_ops hdlcdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) .ndo_open = hdlcdev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) .ndo_stop = hdlcdev_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) .ndo_start_xmit = hdlc_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) .ndo_do_ioctl = hdlcdev_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) .ndo_tx_timeout = hdlcdev_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * hdlcdev_init - called by device driver when adding device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * @info: pointer to device instance information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * Do generic HDLC initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * Return: 0 if success, otherwise error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static int hdlcdev_init(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) hdlc_device *hdlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* allocate and initialize network and HDLC layer objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dev = alloc_hdlcdev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* for network layer reporting purposes only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) dev->mem_start = info->phys_reg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) dev->irq = info->irq_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* network layer callbacks and settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) dev->netdev_ops = &hdlcdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) dev->watchdog_timeo = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) dev->tx_queue_len = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /* generic HDLC layer callbacks and settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) hdlc = dev_to_hdlc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) hdlc->attach = hdlcdev_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) hdlc->xmit = hdlcdev_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /* register objects with HDLC layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) rc = register_hdlc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) info->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * hdlcdev_exit - called by device driver when removing device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * @info: pointer to device instance information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * Do generic HDLC cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static void hdlcdev_exit(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) unregister_hdlc_device(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) free_netdev(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) info->netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #endif /* ifdef CONFIG_HDLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * get async data from rx DMA buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static void rx_async(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct mgsl_icount *icount = &info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) unsigned int start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct slgt_desc *bufs = info->rbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) int chars = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) int stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) unsigned char ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) start = end = info->rbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) while(desc_complete(bufs[end])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) count = desc_count(bufs[end]) - info->rbuf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) p = bufs[end].buf + info->rbuf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) DBGISR(("%s rx_async count=%d\n", info->device_name, count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) DBGDATA(info, p, count, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) for(i=0 ; i < count; i+=2, p+=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ch = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) icount->rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) status = *(p + 1) & (BIT1 + BIT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (status & BIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) icount->parity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) else if (status & BIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) icount->frame++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /* discard char if tty control flags say so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (status & info->ignore_status_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (status & BIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) stat = TTY_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) else if (status & BIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) stat = TTY_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) tty_insert_flip_char(&info->port, ch, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) chars++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (i < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /* receive buffer not completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) info->rbuf_index += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) mod_timer(&info->rx_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) info->rbuf_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) free_rbufs(info, end, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (++end == info->rbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /* if entire list searched then no frame available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (end == start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (chars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) tty_flip_buffer_push(&info->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * return next bottom half action to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int bh_action(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (info->pending_bh & BH_RECEIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) info->pending_bh &= ~BH_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) rc = BH_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) } else if (info->pending_bh & BH_TRANSMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) info->pending_bh &= ~BH_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) rc = BH_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } else if (info->pending_bh & BH_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) info->pending_bh &= ~BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) rc = BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* Mark BH routine as complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) info->bh_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) info->bh_requested = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * perform bottom half processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static void bh_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct slgt_info *info = container_of(work, struct slgt_info, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) info->bh_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) while((action = bh_action(info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case BH_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) DBGBH(("%s bh receive\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) switch(info->params.mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) case MGSL_MODE_ASYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) rx_async(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) case MGSL_MODE_HDLC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) while(rx_get_frame(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) case MGSL_MODE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) case MGSL_MODE_MONOSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) case MGSL_MODE_BISYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) case MGSL_MODE_XSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) while(rx_get_buf(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* restart receiver if rx DMA buffers exhausted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (info->rx_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) case BH_TRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) bh_transmit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) case BH_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) DBGBH(("%s bh status\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) info->ri_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) info->dsr_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) info->dcd_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) info->cts_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) DBGBH(("%s unknown action\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) DBGBH(("%s bh_handler exit\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static void bh_transmit(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct tty_struct *tty = info->port.tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) DBGBH(("%s bh_transmit\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) tty_wakeup(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static void dsr_change(struct slgt_info *info, unsigned short status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (status & BIT3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) info->signals |= SerialSignal_DSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) info->input_signal_events.dsr_up++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) info->signals &= ~SerialSignal_DSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) info->input_signal_events.dsr_down++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) slgt_irq_off(info, IRQ_DSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) info->icount.dsr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) wake_up_interruptible(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) info->pending_bh |= BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static void cts_change(struct slgt_info *info, unsigned short status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (status & BIT2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) info->signals |= SerialSignal_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) info->input_signal_events.cts_up++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) info->signals &= ~SerialSignal_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) info->input_signal_events.cts_down++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) slgt_irq_off(info, IRQ_CTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) info->icount.cts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) wake_up_interruptible(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) info->pending_bh |= BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (tty_port_cts_enabled(&info->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (info->port.tty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (info->port.tty->hw_stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (info->signals & SerialSignal_CTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) info->port.tty->hw_stopped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) info->pending_bh |= BH_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (!(info->signals & SerialSignal_CTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) info->port.tty->hw_stopped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static void dcd_change(struct slgt_info *info, unsigned short status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (status & BIT1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) info->signals |= SerialSignal_DCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) info->input_signal_events.dcd_up++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) info->signals &= ~SerialSignal_DCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) info->input_signal_events.dcd_down++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) slgt_irq_off(info, IRQ_DCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) info->icount.dcd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (info->netcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (info->signals & SerialSignal_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) netif_carrier_on(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) netif_carrier_off(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) wake_up_interruptible(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) info->pending_bh |= BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (tty_port_check_carrier(&info->port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (info->signals & SerialSignal_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) wake_up_interruptible(&info->port.open_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (info->port.tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) tty_hangup(info->port.tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) static void ri_change(struct slgt_info *info, unsigned short status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (status & BIT0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) info->signals |= SerialSignal_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) info->input_signal_events.ri_up++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) info->signals &= ~SerialSignal_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) info->input_signal_events.ri_down++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) slgt_irq_off(info, IRQ_RI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) info->icount.rng++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) wake_up_interruptible(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) info->pending_bh |= BH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static void isr_rxdata(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) unsigned int count = info->rbuf_fill_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) unsigned int i = info->rbuf_fill_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) unsigned short reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) while (rd_reg16(info, SSR) & IRQ_RXDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) reg = rd_reg16(info, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (desc_complete(info->rbufs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) /* all buffers full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) info->rx_restart = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) info->rbufs[i].buf[count++] = (unsigned char)reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /* async mode saves status byte to buffer for each data byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (info->params.mode == MGSL_MODE_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (count == info->rbuf_fill_level || (reg & BIT10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* buffer full or end of frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) set_desc_count(info->rbufs[i], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) info->rbuf_fill_count = count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (++i == info->rbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) info->pending_bh |= BH_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) info->rbuf_fill_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) info->rbuf_fill_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) static void isr_serial(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) unsigned short status = rd_reg16(info, SSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) wr_reg16(info, SSR, status); /* clear pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) info->irq_occurred = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (info->params.mode == MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (status & IRQ_TXIDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (info->tx_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) isr_txeom(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (info->rx_pio && (status & IRQ_RXDATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) isr_rxdata(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) info->icount.brk++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /* process break detection if tty control allows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (info->port.tty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (!(status & info->ignore_status_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (info->read_status_mask & MASK_BREAK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) tty_insert_flip_char(&info->port, 0, TTY_BREAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (info->port.flags & ASYNC_SAK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) do_SAK(info->port.tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) isr_txeom(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (info->rx_pio && (status & IRQ_RXDATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) isr_rxdata(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (status & IRQ_RXIDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (status & RXIDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) info->icount.rxidle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) info->icount.exithunt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (status & IRQ_RXOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (status & IRQ_DSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) dsr_change(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (status & IRQ_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) cts_change(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (status & IRQ_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dcd_change(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (status & IRQ_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) ri_change(info, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void isr_rdma(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) unsigned int status = rd_reg32(info, RDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /* RDCSR (rx DMA control/status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * 31..07 reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * 06 save status byte to DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * 05 error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * 04 eol (end of list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * 03 eob (end of buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * 02 IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * 01 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * 00 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) wr_reg32(info, RDCSR, status); /* clear pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (status & (BIT5 + BIT4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) info->rx_restart = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) info->pending_bh |= BH_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static void isr_tdma(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) unsigned int status = rd_reg32(info, TDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* TDCSR (tx DMA control/status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * 31..06 reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * 05 error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * 04 eol (end of list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * 03 eob (end of buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * 02 IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * 01 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * 00 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) wr_reg32(info, TDCSR, status); /* clear pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (status & (BIT5 + BIT4 + BIT3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) // another transmit buffer has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) // run bottom half to get more send data from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) info->pending_bh |= BH_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * return true if there are unsent tx DMA buffers, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * if there are unsent buffers then info->tbuf_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * is set to index of first unsent buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static bool unsent_tbufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) unsigned int i = info->tbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) bool rc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * search backwards from last loaded buffer (precedes tbuf_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * for first unsent buffer (desc_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) i = info->tbuf_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (!desc_count(info->tbufs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) info->tbuf_start = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) } while (i != info->tbuf_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static void isr_txeom(struct slgt_info *info, unsigned short status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) DBGISR(("%s txeom status=%04x\n", info->device_name, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) tdma_reset(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (status & IRQ_TXUNDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) unsigned short val = rd_reg16(info, TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) wr_reg16(info, TCR, val); /* clear reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (info->tx_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (info->params.mode != MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (status & IRQ_TXUNDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) info->icount.txunder++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) else if (status & IRQ_TXIDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) info->icount.txok++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) if (unsent_tbufs(info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) tx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) update_tx_timer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) info->tx_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) del_timer(&info->tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) info->signals &= ~SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) info->drop_rts_on_tx_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) hdlcdev_tx_done(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) info->pending_bh |= BH_TRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) struct cond_wait *w, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* wake processes waiting for specific transitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (w->data & changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) w->data = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) wake_up_interruptible(&w->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (prev != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) prev->next = w->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) info->gpio_wait_q = w->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) prev = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) /* interrupt service routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * irq interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * dev_id device ID supplied during interrupt registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct slgt_info *info = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) unsigned int gsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) info->irq_occurred = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) for(i=0; i < info->port_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (info->port_array[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) spin_lock(&info->port_array[i]->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (gsr & (BIT8 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) isr_serial(info->port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (gsr & (BIT16 << (i*2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) isr_rdma(info->port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (gsr & (BIT17 << (i*2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) isr_tdma(info->port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) spin_unlock(&info->port_array[i]->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (info->gpio_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) unsigned int changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) spin_lock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) while ((changed = rd_reg32(info, IOSR)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) DBGISR(("%s iosr=%08x\n", info->device_name, changed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /* read latched state of GPIO signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) state = rd_reg32(info, IOVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /* clear pending GPIO interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) wr_reg32(info, IOSR, changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) for (i=0 ; i < info->port_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (info->port_array[i] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) isr_gpio(info->port_array[i], changed, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) spin_unlock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) for(i=0; i < info->port_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct slgt_info *port = info->port_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) if (port == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if ((port->port.count || port->netcount) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) port->pending_bh && !port->bh_running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) !port->bh_requested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) DBGISR(("%s bh queued\n", port->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) schedule_work(&port->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) port->bh_requested = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) static int startup(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) DBGINFO(("%s startup\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (tty_port_initialized(&info->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (!info->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) if (!info->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) DBGERR(("%s can't allocate tx buffer\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) info->pending_bh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) memset(&info->icount, 0, sizeof(info->icount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) /* program hardware for current parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) change_params(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (info->port.tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) tty_port_set_initialized(&info->port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * called by close() and hangup() to shutdown hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static void shutdown(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (!tty_port_initialized(&info->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) DBGINFO(("%s shutdown\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /* clear status wait queue because status changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) /* can't happen after shutting down the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) wake_up_interruptible(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) wake_up_interruptible(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) del_timer_sync(&info->tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) del_timer_sync(&info->rx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) kfree(info->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) info->tx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) flush_cond_wait(&info->gpio_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (info->port.tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) set_bit(TTY_IO_ERROR, &info->port.tty->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) tty_port_set_initialized(&info->port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static void program_hw(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (info->params.mode != MGSL_MODE_ASYNC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) sync_mode(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) async_mode(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) info->dcd_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) info->cts_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) info->ri_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) info->dsr_chkcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (info->netcount ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * reconfigure adapter based on new parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) static void change_params(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) unsigned cflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) int bits_per_char;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (!info->port.tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) DBGINFO(("%s change_params\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) cflag = info->port.tty->termios.c_cflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) /* if B0 rate (hangup) specified then negate RTS and DTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) /* otherwise assert RTS and DTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (cflag & CBAUD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) info->signals |= SerialSignal_RTS | SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) /* byte size and parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) switch (cflag & CSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) case CS5: info->params.data_bits = 5; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) case CS6: info->params.data_bits = 6; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) case CS7: info->params.data_bits = 7; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) case CS8: info->params.data_bits = 8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) default: info->params.data_bits = 7; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (cflag & PARENB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) info->params.parity = ASYNC_PARITY_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /* calculate number of jiffies to transmit a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * FIFO (32 bytes) at specified data rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) bits_per_char = info->params.data_bits +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) info->params.stop_bits + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) info->params.data_rate = tty_get_baud_rate(info->port.tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (info->params.data_rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) info->timeout = (32*HZ*bits_per_char) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) info->params.data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) info->timeout += HZ/50; /* Add .02 seconds of slop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /* process tty input control flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) info->read_status_mask = IRQ_RXOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (I_INPCK(info->port.tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) info->read_status_mask |= MASK_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (I_IGNPAR(info->port.tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (I_IGNBRK(info->port.tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) info->ignore_status_mask |= MASK_BREAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) /* If ignoring parity and break indicators, ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * overruns too. (For real raw support).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (I_IGNPAR(info->port.tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) info->ignore_status_mask |= MASK_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) DBGINFO(("%s get_stats\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (!user_icount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) memset(&info->icount, 0, sizeof(info->icount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) DBGINFO(("%s get_params\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) MGSL_PARAMS tmp_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) DBGINFO(("%s set_params\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) info->base_clock = tmp_params.clock_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) program_hw(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static int get_txidle(struct slgt_info *info, int __user *idle_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (put_user(info->idle_mode, idle_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) static int set_txidle(struct slgt_info *info, int idle_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) info->idle_mode = idle_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (info->params.mode != MGSL_MODE_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) tx_set_idle(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) static int tx_enable(struct slgt_info *info, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (!info->tx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) tx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (info->tx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * abort transmit HDLC frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) static int tx_abort(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) DBGINFO(("%s tx_abort\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) tdma_reset(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) static int rx_enable(struct slgt_info *info, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) unsigned int rbuf_fill_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * enable[31..16] = receive DMA buffer fill level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * 0 = noop (leave fill level unchanged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * fill level must be multiple of 4 and <= buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) rbuf_fill_level = ((unsigned int)enable) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (rbuf_fill_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) info->rbuf_fill_level = rbuf_fill_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (rbuf_fill_level < 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) info->rx_pio = 1; /* PIO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) info->rx_pio = 0; /* DMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) rx_stop(info); /* restart receiver to use new fill level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) * enable[1..0] = receiver enable command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * 0 = disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * 1 = enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) * 2 = enable or force hunt mode if already enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) enable &= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (!info->rx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) else if (enable == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) /* force hunt mode (write 1 to RCR[3]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (info->rx_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * wait for specified event to occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) int rc=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) struct mgsl_icount cprev, cnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) int events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) struct _input_signal_events oldsigs, newsigs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) if (get_user(mask, mask_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /* return immediately if state matches requested events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) s = info->signals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) events = mask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) /* save current irq counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) cprev = info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) oldsigs = info->input_signal_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) /* enable hunt and idle irqs if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) unsigned short val = rd_reg16(info, SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (!(val & IRQ_RXIDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) add_wait_queue(&info->event_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) /* get current irq counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) cnow = info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) newsigs = info->input_signal_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) /* if no change, wait aborted for some reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) if (newsigs.dsr_up == oldsigs.dsr_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) newsigs.dsr_down == oldsigs.dsr_down &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) newsigs.dcd_up == oldsigs.dcd_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) newsigs.dcd_down == oldsigs.dcd_down &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) newsigs.cts_up == oldsigs.cts_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) newsigs.cts_down == oldsigs.cts_down &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) newsigs.ri_up == oldsigs.ri_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) newsigs.ri_down == oldsigs.ri_down &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) cnow.exithunt == cprev.exithunt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) cnow.rxidle == cprev.rxidle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) events = mask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) cprev = cnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) oldsigs = newsigs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) remove_wait_queue(&info->event_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (!waitqueue_active(&info->event_wait_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) /* disable enable exit hunt mode/idle rcvd IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) wr_reg16(info, SCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) rc = put_user(events, mask_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) static int get_interface(struct slgt_info *info, int __user *if_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (put_user(info->if_mode, if_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) static int set_interface(struct slgt_info *info, int if_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) info->if_mode = if_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) msc_set_vcr(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) /* TCR (tx control) 07 1=RTS driver control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) val = rd_reg16(info, TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (info->if_mode & MGSL_INTERFACE_RTS_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) val |= BIT7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) val &= ~BIT7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) wr_reg16(info, TCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) static int get_xsync(struct slgt_info *info, int __user *xsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (put_user(info->xsync, xsync))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * set extended sync pattern (1 to 4 bytes) for extended sync mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) * sync pattern is contained in least significant bytes of value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * most significant byte of sync pattern is oldest (1st sent/detected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) static int set_xsync(struct slgt_info *info, int xsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) info->xsync = xsync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) wr_reg32(info, XSR, xsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) static int get_xctrl(struct slgt_info *info, int __user *xctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (put_user(info->xctrl, xctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * set extended control options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * xctrl[31:19] reserved, must be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * xctrl[18:17] extended sync pattern length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * 00 = 1 byte in xsr[7:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) * 01 = 2 bytes in xsr[15:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) * 10 = 3 bytes in xsr[23:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) * 11 = 4 bytes in xsr[31:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) * xctrl[16] 1 = enable terminal count, 0=disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) * xctrl[15:0] receive terminal count for fixed length packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) * value is count minus one (0 = 1 byte packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * when terminal count is reached, receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) * automatically returns to hunt mode and receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) * FIFO contents are flushed to DMA buffers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) * end of frame (EOF) status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static int set_xctrl(struct slgt_info *info, int xctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) info->xctrl = xctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) wr_reg32(info, XCR, xctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) * set general purpose IO pin state and direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) * user_gpio fields:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * state each bit indicates a pin state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) * smask set bit indicates pin state to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * dir each bit indicates a pin direction (0=input, 1=output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) * dmask set bit indicates pin direction to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) struct gpio_desc gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) __u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (!info->gpio_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) info->device_name, gpio.state, gpio.smask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) gpio.dir, gpio.dmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) spin_lock_irqsave(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (gpio.dmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) data = rd_reg32(info, IODR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) data |= gpio.dmask & gpio.dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) data &= ~(gpio.dmask & ~gpio.dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) wr_reg32(info, IODR, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) if (gpio.smask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) data = rd_reg32(info, IOVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) data |= gpio.smask & gpio.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) data &= ~(gpio.smask & ~gpio.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) wr_reg32(info, IOVR, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) * get general purpose IO pin state and direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) struct gpio_desc gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (!info->gpio_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) gpio.state = rd_reg32(info, IOVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) gpio.smask = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) gpio.dir = rd_reg32(info, IODR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) gpio.dmask = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) info->device_name, gpio.state, gpio.dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * conditional wait facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) static void init_cond_wait(struct cond_wait *w, unsigned int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) init_waitqueue_head(&w->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) init_waitqueue_entry(&w->wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) w->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) add_wait_queue(&w->q, &w->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) w->next = *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) *head = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct cond_wait *w, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) remove_wait_queue(&cw->q, &cw->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (w == cw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (prev != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) prev->next = w->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) *head = w->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) static void flush_cond_wait(struct cond_wait **head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) while (*head != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) wake_up_interruptible(&(*head)->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) *head = (*head)->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) * wait for general purpose I/O pin(s) to enter specified state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * user_gpio fields:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) * state - bit indicates target pin state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) * smask - set bit indicates watched pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * The wait ends when at least one watched pin enters the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) * state. When 0 (no error) is returned, user_gpio->state is set to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) * state of all GPIO pins when the wait ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) * Note: Each pin may be a dedicated input, dedicated output, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) * configurable input/output. The number and configuration of pins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * varies with the specific adapter model. Only input pins (dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) * or configured) can be monitored with this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) struct gpio_desc gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) struct cond_wait wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) u32 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (!info->gpio_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) info->device_name, gpio.state, gpio.smask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* ignore output pins identified by set IODR bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) init_cond_wait(&wait, gpio.smask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) spin_lock_irqsave(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) /* enable interrupts for watched pins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) /* get current pin states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) state = rd_reg32(info, IOVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (gpio.smask & ~(state ^ gpio.state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) /* already in target state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) gpio.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) /* wait for target state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) add_cond_wait(&info->gpio_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) gpio.state = wait.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) spin_lock_irqsave(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) remove_cond_wait(&info->gpio_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /* disable all GPIO interrupts if no waiting processes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (info->gpio_wait_q == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) wr_reg32(info, IOER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static int modem_input_wait(struct slgt_info *info,int arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) struct mgsl_icount cprev, cnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) /* save current irq counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) cprev = info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) add_wait_queue(&info->status_event_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) /* get new irq counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) cnow = info->icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) /* if no change, wait aborted for some reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) /* check for change in caller specified modem input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) cprev = cnow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) remove_wait_queue(&info->status_event_wait_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * return state of serial control and status signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) static int tiocmget(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) unsigned int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) ((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) * set modem control signals (DTR/RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) * TIOCMSET = set/clear signal values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) * value bit mask for command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) static int tiocmset(struct tty_struct *tty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) unsigned int set, unsigned int clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) struct slgt_info *info = tty->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (set & TIOCM_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) info->signals |= SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (set & TIOCM_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) info->signals |= SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (clear & TIOCM_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) info->signals &= ~SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (clear & TIOCM_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) info->signals &= ~SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) static int carrier_raised(struct tty_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) struct slgt_info *info = container_of(port, struct slgt_info, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) return (info->signals & SerialSignal_DCD) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) static void dtr_rts(struct tty_port *port, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct slgt_info *info = container_of(port, struct slgt_info, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) info->signals |= SerialSignal_RTS | SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) * block current process until the device is ready to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) static int block_til_ready(struct tty_struct *tty, struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) bool do_clocal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) int cd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) struct tty_port *port = &info->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) DBGINFO(("%s block_til_ready\n", tty->driver->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) /* nonblock mode is set or port is not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) tty_port_set_active(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (C_CLOCAL(tty))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) do_clocal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) /* Wait for carrier detect and the line to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) * free (i.e., not in use by the callout). While we are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) * this loop, port->count is dropped by one, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) * close() knows when to free things. We restore it upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) * exit, either normal or abnormal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) add_wait_queue(&port->open_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) port->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) port->blocked_open++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (C_BAUD(tty) && tty_port_initialized(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) tty_port_raise_dtr_rts(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) retval = (port->flags & ASYNC_HUP_NOTIFY) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) -EAGAIN : -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) cd = tty_port_carrier_raised(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) if (do_clocal || cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) retval = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) tty_unlock(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) tty_lock(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) remove_wait_queue(&port->open_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (!tty_hung_up_p(filp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) port->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) port->blocked_open--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) tty_port_set_active(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) * allocate buffers used for calling line discipline receive_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) * directly in synchronous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) * note: add 5 bytes to max frame size to allow appending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) * 32-bit CRC and status byte when configured to do so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static int alloc_tmp_rbuf(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (info->tmp_rbuf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) /* unused flag buffer to satisfy receive_buf calling interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) info->flag_buf = kzalloc(info->max_frame_size + 5, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (!info->flag_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) kfree(info->tmp_rbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) info->tmp_rbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) static void free_tmp_rbuf(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) kfree(info->tmp_rbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) info->tmp_rbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) kfree(info->flag_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) info->flag_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * allocate DMA descriptor lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) static int alloc_desc(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) unsigned int pbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) /* allocate memory to hold descriptor lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) info->bufs = dma_alloc_coherent(&info->pdev->dev, DESC_LIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) &info->bufs_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) if (info->bufs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) info->rbufs = (struct slgt_desc*)info->bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) pbufs = (unsigned int)info->bufs_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) * Build circular lists of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) for (i=0; i < info->rbuf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) /* physical address of this descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) /* physical address of next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (i == info->rbuf_count - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) info->rbufs[i].next = cpu_to_le32(pbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) set_desc_count(info->rbufs[i], DMABUFSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) for (i=0; i < info->tbuf_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) /* physical address of this descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) /* physical address of next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (i == info->tbuf_count - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) static void free_desc(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (info->bufs != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) dma_free_coherent(&info->pdev->dev, DESC_LIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) info->bufs, info->bufs_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) info->bufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) info->rbufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) info->tbufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) for (i=0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) bufs[i].buf = dma_alloc_coherent(&info->pdev->dev, DMABUFSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) &bufs[i].buf_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (!bufs[i].buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) for (i=0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (bufs[i].buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) dma_free_coherent(&info->pdev->dev, DMABUFSIZE, bufs[i].buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) bufs[i].buf_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) bufs[i].buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) static int alloc_dma_bufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) info->rbuf_count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) info->tbuf_count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (alloc_desc(info) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) alloc_tmp_rbuf(info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) reset_rbufs(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static void free_dma_bufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (info->bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) free_bufs(info, info->rbufs, info->rbuf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) free_bufs(info, info->tbufs, info->tbuf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) free_desc(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) free_tmp_rbuf(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) static int claim_resources(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) DBGERR(("%s reg addr conflict, addr=%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) info->device_name, info->phys_reg_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) info->init_error = DiagStatus_AddressConflict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) info->reg_addr_requested = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) if (!info->reg_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) DBGERR(("%s can't map device registers, addr=%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) info->device_name, info->phys_reg_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) info->init_error = DiagStatus_CantAssignPciResources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) release_resources(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) static void release_resources(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) if (info->irq_requested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) free_irq(info->irq_level, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) info->irq_requested = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (info->reg_addr_requested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) info->reg_addr_requested = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) if (info->reg_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) iounmap(info->reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) info->reg_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) /* Add the specified device instance data structure to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) * global linked list of devices and increment the device count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) static void add_device(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) char *devstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) info->next_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) info->line = slgt_device_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (info->line < MAX_DEVICES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (maxframe[info->line])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) info->max_frame_size = maxframe[info->line];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) slgt_device_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (!slgt_device_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) slgt_device_list = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) struct slgt_info *current_dev = slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) while(current_dev->next_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) current_dev = current_dev->next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) current_dev->next_device = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) if (info->max_frame_size < 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) info->max_frame_size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) else if (info->max_frame_size > 65535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) info->max_frame_size = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) switch(info->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) case SYNCLINK_GT_DEVICE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) devstr = "GT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) case SYNCLINK_GT2_DEVICE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) devstr = "GT2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) case SYNCLINK_GT4_DEVICE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) devstr = "GT4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) case SYNCLINK_AC_DEVICE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) devstr = "AC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) info->params.mode = MGSL_MODE_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) devstr = "(unknown model)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) devstr, info->device_name, info->phys_reg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) info->irq_level, info->max_frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) hdlcdev_init(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) static const struct tty_port_operations slgt_port_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) .carrier_raised = carrier_raised,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) .dtr_rts = dtr_rts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) * allocate device instance structure, return NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) struct slgt_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) DBGERR(("%s device alloc failed adapter=%d port=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) driver_name, adapter_num, port_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) tty_port_init(&info->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) info->port.ops = &slgt_port_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) info->magic = MGSL_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) INIT_WORK(&info->task, bh_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) info->max_frame_size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) info->base_clock = 14745600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) info->rbuf_fill_level = DMABUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) info->port.close_delay = 5*HZ/10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) info->port.closing_wait = 30*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) init_waitqueue_head(&info->status_event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) init_waitqueue_head(&info->event_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) spin_lock_init(&info->netlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) info->idle_mode = HDLC_TXIDLE_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) info->adapter_num = adapter_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) info->port_num = port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) timer_setup(&info->tx_timer, tx_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) timer_setup(&info->rx_timer, rx_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) /* Copy configuration info to device instance data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) info->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) info->irq_level = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) info->phys_reg_addr = pci_resource_start(pdev,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) info->bus_type = MGSL_BUS_TYPE_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) info->irq_flags = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) info->init_error = -1; /* assume error, set to 0 on successful init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) static void device_init(int adapter_num, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) struct slgt_info *port_array[SLGT_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) int port_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) port_count = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) port_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) /* allocate device instances for all ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) for (i=0; i < port_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) port_array[i] = alloc_dev(adapter_num, i, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (port_array[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) for (--i; i >= 0; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) tty_port_destroy(&port_array[i]->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) kfree(port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) /* give copy of port_array to all ports and add to device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) for (i=0; i < port_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) add_device(port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) port_array[i]->port_count = port_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) spin_lock_init(&port_array[i]->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) /* Allocate and claim adapter resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (!claim_resources(port_array[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) alloc_dma_bufs(port_array[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) /* copy resource information from first port to others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) for (i = 1; i < port_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) port_array[i]->irq_level = port_array[0]->irq_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) port_array[i]->reg_addr = port_array[0]->reg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) alloc_dma_bufs(port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (request_irq(port_array[0]->irq_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) slgt_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) port_array[0]->irq_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) port_array[0]->device_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) port_array[0]) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) DBGERR(("%s request_irq failed IRQ=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) port_array[0]->device_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) port_array[0]->irq_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) port_array[0]->irq_requested = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) adapter_test(port_array[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) for (i=1 ; i < port_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) port_array[i]->init_error = port_array[0]->init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) port_array[i]->gpio_present = port_array[0]->gpio_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) for (i = 0; i < port_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) struct slgt_info *info = port_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) tty_port_register_device(&info->port, serial_driver, info->line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) &info->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) static int init_one(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) if (pci_enable_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) printk("error enabling pci device %p\n", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) pci_set_master(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) device_init(slgt_device_count, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) static void remove_one(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) static const struct tty_operations ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) .open = open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) .close = close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) .write = write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) .put_char = put_char,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) .flush_chars = flush_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) .write_room = write_room,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) .chars_in_buffer = chars_in_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) .flush_buffer = flush_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) .ioctl = ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) .compat_ioctl = slgt_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) .throttle = throttle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) .unthrottle = unthrottle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) .send_xchar = send_xchar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) .break_ctl = set_break,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) .wait_until_sent = wait_until_sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) .set_termios = set_termios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) .stop = tx_hold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) .start = tx_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) .hangup = hangup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) .tiocmget = tiocmget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) .tiocmset = tiocmset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) .get_icount = get_icount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) .proc_show = synclink_gt_proc_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) static void slgt_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) struct slgt_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) struct slgt_info *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) printk(KERN_INFO "unload %s\n", driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) if (serial_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) for (info=slgt_device_list ; info != NULL ; info=info->next_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) tty_unregister_device(serial_driver, info->line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) rc = tty_unregister_driver(serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) DBGERR(("tty_unregister_driver error=%d\n", rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) put_tty_driver(serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) /* reset devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) info = slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) while(info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) reset_port(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) info = info->next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) /* release devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) info = slgt_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) while(info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) hdlcdev_exit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) free_dma_bufs(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) free_tmp_rbuf(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) if (info->port_num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) release_resources(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) tmp = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) info = info->next_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) tty_port_destroy(&tmp->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) if (pci_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) pci_unregister_driver(&pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) * Driver initialization entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) static int __init slgt_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) printk(KERN_INFO "%s\n", driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) serial_driver = alloc_tty_driver(MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (!serial_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) printk("%s can't allocate tty driver\n", driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) /* Initialize the tty_driver structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) serial_driver->driver_name = slgt_driver_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) serial_driver->name = tty_dev_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) serial_driver->major = ttymajor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) serial_driver->minor_start = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) serial_driver->subtype = SERIAL_TYPE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) serial_driver->init_termios = tty_std_termios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) serial_driver->init_termios.c_cflag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) B9600 | CS8 | CREAD | HUPCL | CLOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) serial_driver->init_termios.c_ispeed = 9600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) serial_driver->init_termios.c_ospeed = 9600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) tty_set_operations(serial_driver, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) if ((rc = tty_register_driver(serial_driver)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) DBGERR(("%s can't register serial driver\n", driver_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) put_tty_driver(serial_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) serial_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) printk(KERN_INFO "%s, tty major#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) driver_name, serial_driver->major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) slgt_device_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if ((rc = pci_register_driver(&pci_driver)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) printk("%s pci_register_driver error=%d\n", driver_name, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) pci_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) if (!slgt_device_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) printk("%s no devices found\n",driver_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) slgt_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) static void __exit slgt_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) slgt_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) module_init(slgt_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) module_exit(slgt_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) * register access routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) #define CALC_REGADDR() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) if (addr >= 0x80) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) reg_addr += (info->port_num) * 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) else if (addr >= 0x40) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) reg_addr += (info->port_num) * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) return readb((void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) writeb(value, (void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) return readw((void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) writew(value, (void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) return readl((void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) CALC_REGADDR();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) writel(value, (void __iomem *)reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) static void rdma_reset(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) wr_reg32(info, RDCSR, BIT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) /* wait for enable bit cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) for(i=0 ; i < 1000 ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (!(rd_reg32(info, RDCSR) & BIT0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) static void tdma_reset(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) wr_reg32(info, TDCSR, BIT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) /* wait for enable bit cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) for(i=0 ; i < 1000 ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (!(rd_reg32(info, TDCSR) & BIT0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) * enable internal loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) * TxCLK and RxCLK are generated from BRG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) * and TxD is looped back to RxD internally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) static void enable_loopback(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) /* SCR (serial control) BIT2=loopback enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) if (info->params.mode != MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) /* CCR (clock control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) * 07..05 tx clock source (010 = BRG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) * 04..02 rx clock source (010 = BRG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) * 01 auxclk enable (0 = disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) * 00 BRG enable (1 = enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) * 0100 1001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) wr_reg8(info, CCR, 0x49);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) /* set speed if available, otherwise use default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) if (info->params.clock_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) set_rate(info, info->params.clock_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) set_rate(info, 3686400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) * set baud rate generator to specified rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) static void set_rate(struct slgt_info *info, u32 rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) unsigned int div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) unsigned int osc = info->base_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) /* div = osc/rate - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) * Round div up if osc/rate is not integer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * force to next slowest rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) div = osc/rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) if (!(osc % rate) && div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) div--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) wr_reg16(info, BDR, (unsigned short)div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) static void rx_stop(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) /* disable and reset receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) wr_reg16(info, RCR, val); /* clear reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) /* clear pending rx interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) rdma_reset(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) info->rx_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) info->rx_restart = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) static void rx_start(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) /* clear pending rx overrun IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) wr_reg16(info, SSR, IRQ_RXOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) /* reset and disable receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) wr_reg16(info, RCR, val); /* clear reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) rdma_reset(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) reset_rbufs(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (info->rx_pio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) /* rx request when rx FIFO not empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) slgt_irq_on(info, IRQ_RXDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) if (info->params.mode == MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) /* enable saving of rx status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) wr_reg32(info, RDCSR, BIT6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) /* rx request when rx FIFO half full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) /* set 1st descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) if (info->params.mode != MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) /* enable rx DMA and DMA interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) wr_reg32(info, RDCSR, (BIT2 + BIT0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) /* enable saving of rx status, rx DMA and DMA interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) slgt_irq_on(info, IRQ_RXOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) /* enable receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) info->rx_restart = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) info->rx_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) static void tx_start(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (!info->tx_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) wr_reg16(info, TCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) info->tx_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (desc_count(info->tbufs[info->tbuf_start])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) info->drop_rts_on_tx_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) if (info->params.mode != MGSL_MODE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) get_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (!(info->signals & SerialSignal_RTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) info->signals |= SerialSignal_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) info->drop_rts_on_tx_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) slgt_irq_off(info, IRQ_TXDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) /* clear tx idle and underrun status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) slgt_irq_off(info, IRQ_TXDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) slgt_irq_on(info, IRQ_TXIDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) /* clear tx idle status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) wr_reg16(info, SSR, IRQ_TXIDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) /* set 1st descriptor address and start DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) wr_reg32(info, TDCSR, BIT2 + BIT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) info->tx_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) static void tx_stop(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) del_timer(&info->tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) tdma_reset(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) /* reset and disable transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /* clear tx idle and underrun status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) reset_tbufs(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) info->tx_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) info->tx_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) static void reset_port(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if (!info->reg_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) set_gtsignals(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) static void reset_adapter(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) for (i=0; i < info->port_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (info->port_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) reset_port(info->port_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) static void async_mode(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) /* TCR (tx control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) * 15..13 mode, 010=async
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) * 12..10 encoding, 000=NRZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * 09 parity enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) * 08 1=odd parity, 0=even parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) * 07 1=RTS driver control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) * 06 1=break enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) * 05..04 character length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * 00=5 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * 01=6 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) * 10=7 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) * 11=8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) * 03 0=1 stop bit, 1=2 stop bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) * 02 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) * 01 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) * 00 auto-CTS enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) val = 0x4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (info->if_mode & MGSL_INTERFACE_RTS_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) val |= BIT7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) if (info->params.parity != ASYNC_PARITY_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) val |= BIT9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (info->params.parity == ASYNC_PARITY_ODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) val |= BIT8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) switch (info->params.data_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) case 6: val |= BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) case 7: val |= BIT5; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) case 8: val |= BIT5 + BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) if (info->params.stop_bits != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) val |= BIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) if (info->params.flags & HDLC_FLAG_AUTO_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) val |= BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) wr_reg16(info, TCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) /* RCR (rx control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) * 15..13 mode, 010=async
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) * 12..10 encoding, 000=NRZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) * 09 parity enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) * 08 1=odd parity, 0=even parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) * 07..06 reserved, must be 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) * 05..04 character length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) * 00=5 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) * 01=6 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) * 10=7 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) * 11=8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) * 03 reserved, must be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) * 02 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) * 01 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) * 00 auto-DCD enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) val = 0x4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (info->params.parity != ASYNC_PARITY_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) val |= BIT9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) if (info->params.parity == ASYNC_PARITY_ODD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) val |= BIT8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) switch (info->params.data_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) case 6: val |= BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) case 7: val |= BIT5; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) case 8: val |= BIT5 + BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) if (info->params.flags & HDLC_FLAG_AUTO_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) val |= BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) wr_reg16(info, RCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) /* CCR (clock control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) * 07..05 011 = tx clock source is BRG/16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) * 04..02 010 = rx clock source is BRG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) * 01 0 = auxclk disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) * 00 1 = BRG enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) * 0110 1001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) wr_reg8(info, CCR, 0x69);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) msc_set_vcr(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) /* SCR (serial control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * 15 1=tx req on FIFO half empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) * 14 1=rx req on FIFO half full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) * 13 tx data IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) * 12 tx idle IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) * 11 rx break on IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) * 10 rx data IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) * 09 rx break off IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) * 08 overrun IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) * 07 DSR IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) * 06 CTS IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) * 05 DCD IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) * 04 RI IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) * 03 0=16x sampling, 1=8x sampling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) * 02 1=txd->rxd internal loopback enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) * 01 reserved, must be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) * 00 1=master IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) val = BIT15 + BIT14 + BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) /* JCR[8] : 1 = x8 async mode feature available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) ((info->base_clock < (info->params.data_rate * 16)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) (info->base_clock % (info->params.data_rate * 16)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) /* use 8x sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) val |= BIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) set_rate(info, info->params.data_rate * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) /* use 16x sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) set_rate(info, info->params.data_rate * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) wr_reg16(info, SCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) if (info->params.loopback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) enable_loopback(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) static void sync_mode(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) unsigned short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) rx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) /* TCR (tx control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) * 15..13 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) * 000=HDLC/SDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) * 001=raw bit synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) * 010=asynchronous/isochronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) * 011=monosync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) * 100=bisync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) * 101=xsync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) * 12..10 encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) * 09 CRC enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) * 08 CRC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) * 07 1=RTS driver control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) * 06 preamble enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) * 05..04 preamble length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) * 03 share open/close flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) * 02 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) * 01 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) * 00 auto-CTS enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) val = BIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) switch(info->params.mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) case MGSL_MODE_XSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) val |= BIT15 + BIT13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) case MGSL_MODE_BISYNC: val |= BIT15; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) case MGSL_MODE_RAW: val |= BIT13; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) if (info->if_mode & MGSL_INTERFACE_RTS_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) val |= BIT7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) switch(info->params.encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) case HDLC_ENCODING_NRZB: val |= BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) switch (info->params.crc_type & HDLC_CRC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) case HDLC_CRC_16_CCITT: val |= BIT9; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) val |= BIT6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) switch (info->params.preamble_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) if (info->params.flags & HDLC_FLAG_AUTO_CTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) val |= BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) wr_reg16(info, TCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) /* TPR (transmit preamble) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) switch (info->params.preamble)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) default: val = 0x7e; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) wr_reg8(info, TPR, (unsigned char)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) /* RCR (rx control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) * 15..13 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) * 000=HDLC/SDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) * 001=raw bit synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) * 010=asynchronous/isochronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) * 011=monosync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) * 100=bisync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) * 101=xsync byte synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) * 12..10 encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) * 09 CRC enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) * 08 CRC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) * 07..03 reserved, must be 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) * 02 reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) * 01 enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) * 00 auto-DCD enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) switch(info->params.mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) case MGSL_MODE_XSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) val |= BIT15 + BIT13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) case MGSL_MODE_BISYNC: val |= BIT15; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) case MGSL_MODE_RAW: val |= BIT13; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) switch(info->params.encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) case HDLC_ENCODING_NRZB: val |= BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) switch (info->params.crc_type & HDLC_CRC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) case HDLC_CRC_16_CCITT: val |= BIT9; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) if (info->params.flags & HDLC_FLAG_AUTO_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) val |= BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) wr_reg16(info, RCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) /* CCR (clock control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) * 07..05 tx clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) * 04..02 rx clock source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) * 01 auxclk enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) * 00 BRG enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (info->params.flags & HDLC_FLAG_TXC_BRG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) // when RxC source is DPLL, BRG generates 16X DPLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) // reference clock, so take TxC from BRG/16 to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) // transmit clock at actual data rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) if (info->params.flags & HDLC_FLAG_RXC_DPLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) val |= BIT6; /* 010, txclk = BRG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) val |= BIT7; /* 100, txclk = DPLL Input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) val |= BIT5; /* 001, txclk = RXC Input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) if (info->params.flags & HDLC_FLAG_RXC_BRG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) val |= BIT3; /* 010, rxclk = BRG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) val |= BIT4; /* 100, rxclk = DPLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) val |= BIT2; /* 001, rxclk = TXC Input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) if (info->params.clock_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) val |= BIT1 + BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) wr_reg8(info, CCR, (unsigned char)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) // program DPLL mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) switch(info->params.encoding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) case HDLC_ENCODING_BIPHASE_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) case HDLC_ENCODING_BIPHASE_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) val = BIT7; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) case HDLC_ENCODING_BIPHASE_LEVEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) val = BIT7 + BIT6; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) default: val = BIT6; // NRZ encodings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) // DPLL requires a 16X reference clock from BRG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) set_rate(info, info->params.clock_speed * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) set_rate(info, info->params.clock_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) tx_set_idle(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) msc_set_vcr(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) /* SCR (serial control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) * 15 1=tx req on FIFO half empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) * 14 1=rx req on FIFO half full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) * 13 tx data IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) * 12 tx idle IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) * 11 underrun IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) * 10 rx data IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) * 09 rx idle IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) * 08 overrun IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) * 07 DSR IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) * 06 CTS IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * 05 DCD IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * 04 RI IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * 03 reserved, must be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * 02 1=txd->rxd internal loopback enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) * 01 reserved, must be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) * 00 1=master IRQ enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) if (info->params.loopback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) enable_loopback(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) * set transmit idle mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) static void tx_set_idle(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) unsigned char val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) unsigned short tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) tcr = rd_reg16(info, TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) /* disable preamble, set idle size to 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) /* MSB of 16 bit idle specified in tx preamble register (TPR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) } else if (!(tcr & BIT6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) /* preamble is disabled, set idle size to 8 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) tcr &= ~(BIT5 + BIT4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) wr_reg16(info, TCR, tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) /* LSB of custom tx idle specified in tx idle register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) val = (unsigned char)(info->idle_mode & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) /* standard 8 bit idle patterns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) switch(info->idle_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) case HDLC_TXIDLE_ALT_ZEROS_ONES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) case HDLC_TXIDLE_ZEROS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) case HDLC_TXIDLE_SPACE: val = 0x00; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) default: val = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) wr_reg8(info, TIR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) * get state of V24 status (input) signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) static void get_gtsignals(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) unsigned short status = rd_reg16(info, SSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) /* clear all serial signals except RTS and DTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) info->signals &= SerialSignal_RTS | SerialSignal_DTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (status & BIT3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) info->signals |= SerialSignal_DSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (status & BIT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) info->signals |= SerialSignal_CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) if (status & BIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) info->signals |= SerialSignal_DCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) if (status & BIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) info->signals |= SerialSignal_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) * set V.24 Control Register based on current configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) static void msc_set_vcr(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) unsigned char val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) /* VCR (V.24 control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) * 07..04 serial IF select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) * 03 DTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) * 02 RTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) * 01 LL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) * 00 RL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) switch(info->if_mode & MGSL_INTERFACE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) case MGSL_INTERFACE_RS232:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) val |= BIT5; /* 0010 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) case MGSL_INTERFACE_V35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) val |= BIT7 + BIT6 + BIT5; /* 1110 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) case MGSL_INTERFACE_RS422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) val |= BIT6; /* 0100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) val |= BIT4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) if (info->signals & SerialSignal_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) val |= BIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) if (info->signals & SerialSignal_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) val |= BIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if (info->if_mode & MGSL_INTERFACE_LL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) val |= BIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (info->if_mode & MGSL_INTERFACE_RL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) val |= BIT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) wr_reg8(info, VCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) * set state of V24 control (output) signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) static void set_gtsignals(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) unsigned char val = rd_reg8(info, VCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) if (info->signals & SerialSignal_DTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) val |= BIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) val &= ~BIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) if (info->signals & SerialSignal_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) val |= BIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) val &= ~BIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) wr_reg8(info, VCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) * free range of receive DMA buffers (i to last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) while(!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) /* reset current buffer for reuse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) info->rbufs[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) set_desc_count(info->rbufs[i], info->rbuf_fill_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) if (i == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) if (++i == info->rbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) info->rbuf_current = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) * mark all receive DMA buffers as free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) static void reset_rbufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) free_rbufs(info, 0, info->rbuf_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) info->rbuf_fill_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) info->rbuf_fill_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) * pass receive HDLC frame to upper layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) * return true if frame available, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) static bool rx_get_frame(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) unsigned int start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) unsigned int framesize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) struct tty_struct *tty = info->port.tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) unsigned char addr_field = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) unsigned int crc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) switch (info->params.crc_type & HDLC_CRC_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) case HDLC_CRC_16_CCITT: crc_size = 2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) case HDLC_CRC_32_CCITT: crc_size = 4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) check_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) framesize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) addr_field = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) start = end = info->rbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) if (!desc_complete(info->rbufs[end]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) if (framesize == 0 && info->params.addr_filter != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) addr_field = info->rbufs[end].buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) framesize += desc_count(info->rbufs[end]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) if (desc_eof(info->rbufs[end]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) if (++end == info->rbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (end == info->rbuf_current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) if (info->rx_enabled){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) /* status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) * 15 buffer complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) * 14..06 reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) * 05..04 residue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) * 02 eof (end of frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) * 01 CRC error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * 00 abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) status = desc_status(info->rbufs[end]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) /* ignore CRC bit if not using CRC (bit is undefined) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) status &= ~BIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) if (framesize == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) (addr_field != 0xff && addr_field != info->params.addr_filter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) free_rbufs(info, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) goto check_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) if (framesize < (2 + crc_size) || status & BIT0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) info->icount.rxshort++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) framesize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) } else if (status & BIT1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) info->icount.rxcrc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) framesize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if (framesize == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) info->netdev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) info->netdev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) DBGBH(("%s rx frame status=%04X size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) info->device_name, status, framesize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) if (framesize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) framesize -= crc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) crc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) if (framesize > info->max_frame_size + crc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) info->icount.rxlong++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) /* copy dma buffer(s) to contiguous temp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) int copy_count = framesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) int i = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) unsigned char *p = info->tmp_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) info->tmp_rbuf_count = framesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) info->icount.rxok++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) while(copy_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) memcpy(p, info->rbufs[i].buf, partial_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) p += partial_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) copy_count -= partial_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) if (++i == info->rbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) framesize++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) if (info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) hdlcdev_rx(info,info->tmp_rbuf, framesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) free_rbufs(info, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) * pass receive buffer (RAW synchronous mode) to tty layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) * return true if buffer available, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) static bool rx_get_buf(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) unsigned int i = info->rbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) if (!desc_complete(info->rbufs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) count = desc_count(info->rbufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) switch(info->params.mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) case MGSL_MODE_MONOSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) case MGSL_MODE_BISYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) case MGSL_MODE_XSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) /* ignore residue in byte synchronous modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) if (desc_residue(info->rbufs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) DBGDATA(info, info->rbufs[i].buf, count, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) DBGINFO(("rx_get_buf size=%d\n", count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) info->flag_buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) free_rbufs(info, i, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) static void reset_tbufs(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) info->tbuf_current = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) for (i=0 ; i < info->tbuf_count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) info->tbufs[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) info->tbufs[i].count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) * return number of free transmit DMA buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) static unsigned int free_tbuf_count(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) unsigned int i = info->tbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) if (desc_count(info->tbufs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) break; /* buffer in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) if (++i == info->tbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) i=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) } while (i != info->tbuf_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) /* if tx DMA active, last zero count buffer is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) if (count && (rd_reg32(info, TDCSR) & BIT0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) * return number of bytes in unsent transmit DMA buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) * and the serial controller tx FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) static unsigned int tbuf_bytes(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) unsigned int total_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) unsigned int i = info->tbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) unsigned int reg_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) unsigned int active_buf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) * Add descriptor counts for all tx DMA buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) * If count is zero (cleared by DMA controller after read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) * the buffer is complete or is actively being read from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) * Record buf_count of last buffer with zero count starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) * from current ring position. buf_count is mirror
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) * copy of count and is not cleared by serial controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) * If DMA controller is active, that buffer is actively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) * being read so add to total.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) count = desc_count(info->tbufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) total_count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) else if (!total_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) active_buf_count = info->tbufs[i].buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) if (++i == info->tbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) } while (i != info->tbuf_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) /* read tx DMA status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) reg_value = rd_reg32(info, TDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) /* if tx DMA active, last zero count buffer is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if (reg_value & BIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) total_count += active_buf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) /* add tx FIFO count = reg_value[15..8] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) total_count += (reg_value >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) /* if transmitter active add one byte for shift register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) if (info->tx_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) total_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) return total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) * load data into transmit DMA buffer ring and start transmitter if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) * return true if data accepted, otherwise false (buffers full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) unsigned short count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) struct slgt_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) /* check required buffer space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) DBGDATA(info, buf, size, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) * copy data to one or more DMA buffers in circular ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) * tbuf_start = first buffer for this data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) * tbuf_current = next free buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) * Copy all data before making data visible to DMA controller by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) * setting descriptor count of the first buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) * This prevents an active DMA controller from reading the first DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) * buffers of a frame and stopping before the final buffers are filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) info->tbuf_start = i = info->tbuf_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) d = &info->tbufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) memcpy(d->buf, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) size -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) buf += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) * set EOF bit for last buffer of HDLC frame or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) * for every buffer in raw mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) info->params.mode == MGSL_MODE_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) set_desc_eof(*d, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) set_desc_eof(*d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) /* set descriptor count for all but first buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) if (i != info->tbuf_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) set_desc_count(*d, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) d->buf_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) if (++i == info->tbuf_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) info->tbuf_current = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) /* set first buffer count to make new data visible to DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) d = &info->tbufs[info->tbuf_start];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) set_desc_count(*d, d->buf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) /* start transmitter if needed and update transmit timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) if (!info->tx_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) tx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) update_tx_timer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) static int register_test(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) static unsigned short patterns[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) static unsigned int count = ARRAY_SIZE(patterns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) for (i=0 ; i < count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) wr_reg16(info, TIR, patterns[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) wr_reg16(info, BDR, patterns[(i+1)%count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) if ((rd_reg16(info, TIR) != patterns[i]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) info->init_error = rc ? 0 : DiagStatus_AddressFailure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) static int irq_test(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) struct tty_struct *oldtty = info->port.tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) u32 speed = info->params.data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) info->params.data_rate = 921600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) info->port.tty = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) async_mode(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) slgt_irq_on(info, IRQ_TXIDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) /* enable transmitter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) wr_reg16(info, TCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) (unsigned short)(rd_reg16(info, TCR) | BIT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) /* write one byte and wait for tx idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) wr_reg16(info, TDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) /* assume failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) info->init_error = DiagStatus_IrqFailure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) info->irq_occurred = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) timeout=100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) while(timeout-- && !info->irq_occurred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) msleep_interruptible(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) reset_port(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) info->params.data_rate = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) info->port.tty = oldtty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) return info->irq_occurred ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) static int loopback_test_rx(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) unsigned char *src, *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) if (desc_complete(info->rbufs[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) count = desc_count(info->rbufs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) src = info->rbufs[0].buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) dest = info->tmp_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) for( ; count ; count-=2, src+=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) /* src=data byte (src+1)=status byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) if (!(*(src+1) & (BIT9 + BIT8))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) *dest = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) dest++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) info->tmp_rbuf_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) static int loopback_test(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) #define TESTFRAMESIZE 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) u16 count = TESTFRAMESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) unsigned char buf[TESTFRAMESIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) struct tty_struct *oldtty = info->port.tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) MGSL_PARAMS params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) memcpy(¶ms, &info->params, sizeof(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) info->params.mode = MGSL_MODE_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) info->params.data_rate = 921600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) info->params.loopback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) info->port.tty = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) /* build and send transmit frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) for (count = 0; count < TESTFRAMESIZE; ++count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) buf[count] = (unsigned char)count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) info->tmp_rbuf_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) /* program hardware for HDLC and enabled receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) async_mode(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) rx_start(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) tx_load(info, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) /* wait for receive complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) for (timeout = 100; timeout; --timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) msleep_interruptible(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) if (loopback_test_rx(info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) /* verify received frame length and contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) if (!rc && (info->tmp_rbuf_count != count ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) memcmp(buf, info->tmp_rbuf, count))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) reset_adapter(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) memcpy(&info->params, ¶ms, sizeof(info->params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) info->port.tty = oldtty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) info->init_error = rc ? DiagStatus_DmaFailure : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) static int adapter_test(struct slgt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) DBGINFO(("testing %s\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) if (register_test(info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) printk("register test failure %s addr=%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) info->device_name, info->phys_reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) } else if (irq_test(info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) printk("IRQ test failure %s IRQ=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) info->device_name, info->irq_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) } else if (loopback_test(info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) printk("loopback test failure %s\n", info->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) return info->init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) * transmit timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) static void tx_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) struct slgt_info *info = from_timer(info, t, tx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) DBGINFO(("%s tx_timeout\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) info->icount.txtimeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) spin_lock_irqsave(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) tx_stop(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) spin_unlock_irqrestore(&info->lock,flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) #if SYNCLINK_GENERIC_HDLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) if (info->netcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) hdlcdev_tx_done(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) bh_transmit(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) * receive buffer polling timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) static void rx_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) struct slgt_info *info = from_timer(info, t, rx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) DBGINFO(("%s rx_timeout\n", info->device_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) spin_lock_irqsave(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) info->pending_bh |= BH_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) spin_unlock_irqrestore(&info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) bh_handler(&info->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)