^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/virtio_console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "../tty/hvc/hvc_console.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This is a global struct for storing common data for all the devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * this driver handles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Mainly, it has a linked list for all the consoles in one place so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * that callbacks from hvc for get_chars(), put_chars() work properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * across multiple devices and multiple ports per device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct ports_driver_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Used for registering chardevs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct class *class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Used for exporting per-port information to debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct dentry *debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* List of all the devices we're handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct list_head portdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * This is used to keep track of the number of hvc consoles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * spawned by this driver. This number is given as the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * argument to hvc_alloc(). To correctly map an initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * console spawned via hvc_instantiate to the console being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * hooked up via hvc_alloc, we need to pass the same vtermno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * We also just assume the first console being initialised was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * the first one that got used as the initial console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int next_vtermno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* All the console devices handled by this driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct list_head consoles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static DEFINE_SPINLOCK(pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static DECLARE_COMPLETION(early_console_added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* This struct holds information that's relevant only for console ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct console {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* We'll place all consoles in a list in the pdrvdata struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* The hvc device associated with this console port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct hvc_struct *hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* The size of the console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct winsize ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * This number identifies the number that we used to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * with hvc in hvc_instantiate() and hvc_alloc(); this is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * number passed on by the hvc callbacks to us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * differentiate between the other console ports handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 vtermno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct port_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* size of the buffer in *buf above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* used length of the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* offset in the buf from which to consume data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* DMA address of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Device we got DMA memory from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* List of pending dma buffers to free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* If sgpages == 0 then buf is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int sgpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* sg is used if spages > 0. sg must be the last in is struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct scatterlist sg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * This is a per-device struct that stores data common to all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * ports for that device (vdev->priv).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct ports_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Next portdev in the list, head is in the pdrvdata struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Workqueue handlers where we process deferred work after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct work_struct control_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct work_struct config_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct list_head ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* To protect the list of ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spinlock_t ports_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* To protect the vq operations for the control channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spinlock_t c_ivq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spinlock_t c_ovq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* max. number of ports this device can hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 max_nr_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* The virtio device we're associated with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * A couple of virtqueues for the control channel: one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * guest->host transfers, one for host->guest transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct virtqueue *c_ivq, *c_ovq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * A control packet buffer for guest->host requests, protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * by c_ovq_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct virtio_console_control cpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Array of per-port IO virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct virtqueue **in_vqs, **out_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Major number for this device. Ports will be created as minors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int chr_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct port_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long bytes_sent, bytes_received, bytes_discarded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* This struct holds the per-port data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Next port in the list, head is in the ports_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Pointer to the parent virtio_console device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* The current buffer from which data has to be fed to readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct port_buffer *inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * To protect the operations on the in_vq associated with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * port. Has to be a spinlock because it can be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * interrupt context (get_char()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spinlock_t inbuf_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Protect the operations on the out_vq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spinlock_t outvq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* The IO vqs for this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct virtqueue *in_vq, *out_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* File in the debugfs directory that exposes this port's information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct dentry *debugfs_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Keep count of the bytes sent, received and discarded for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * this port for accounting and debugging purposes. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * counts are not reset across port open / close events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct port_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * The entries in this struct will be valid if this port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * hooked up to an hvc console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct console cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Each port associates with a separate char device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Reference-counting to handle port hot-unplugs and file operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* A waitqueue for poll() or blocking read operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) wait_queue_head_t waitqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* The 'name' of the port that we expose via sysfs properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* We can notify apps of host connect / disconnect events via SIGIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct fasync_struct *async_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* The 'id' to identify the port with the Host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bool outvq_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Is the host device open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) bool host_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* We should allow only one process to open a port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bool guest_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* This is the very early arch-specified put chars function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int (*early_put_chars)(u32, const char *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static struct port *find_port_by_vtermno(u32 vtermno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct console *cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_lock_irqsave(&pdrvdata_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) list_for_each_entry(cons, &pdrvdata.consoles, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (cons->vtermno == vtermno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) port = container_of(cons, struct port, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_unlock_irqrestore(&pdrvdata_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dev_t dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_lock_irqsave(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) list_for_each_entry(port, &portdev->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (port->cdev->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kref_get(&port->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_unlock_irqrestore(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static struct port *find_port_by_devt(dev_t dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock_irqsave(&pdrvdata_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) port = find_port_by_devt_in_portdev(portdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock_irqrestore(&pdrvdata_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) spin_lock_irqsave(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) list_for_each_entry(port, &portdev->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (port->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) spin_unlock_irqrestore(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static struct port *find_port_by_vq(struct ports_device *portdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_lock_irqsave(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) list_for_each_entry(port, &portdev->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (port->in_vq == vq || port->out_vq == vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) spin_unlock_irqrestore(&portdev->ports_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static bool is_console_port(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (port->cons.hvc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static bool is_rproc_serial(const struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static inline bool use_multiport(struct ports_device *portdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * This condition can be true when put_chars is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * early_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!portdev->vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static DEFINE_SPINLOCK(dma_bufs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static LIST_HEAD(pending_free_dma_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void free_buf(struct port_buffer *buf, bool can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) for (i = 0; i < buf->sgpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct page *page = sg_page(&buf->sg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!buf->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kfree(buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else if (is_rproc_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* dma_free_coherent requires interrupts to be enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!can_sleep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* queue up dma-buffers to be freed later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_irqsave(&dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) list_add_tail(&buf->list, &pending_free_dma_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spin_unlock_irqrestore(&dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Release device refcnt and allow it to be freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) put_device(buf->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void reclaim_dma_bufs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct port_buffer *buf, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) LIST_HEAD(tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (list_empty(&pending_free_dma_bufs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Create a copy of the pending_free_dma_bufs while holding the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) spin_lock_irqsave(&dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) list_cut_position(&tmp_list, &pending_free_dma_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pending_free_dma_bufs.prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_unlock_irqrestore(&dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Release the dma buffers, without irqs enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) list_del(&buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) free_buf(buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) reclaim_dma_bufs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Allocate buffer and the sg list. The sg list array is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * directly after the port_buffer struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) buf->sgpages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) buf->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) buf->buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (is_rproc_serial(vdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Allocate DMA memory from ancestor. When a virtio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * device is created by remoteproc, the DMA memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * associated with the parent device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * virtioY => remoteprocX#vdevYbuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) buf->dev = vdev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!buf->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Increase device refcnt to avoid freeing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) get_device(buf->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) buf->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) buf->buf = kmalloc(buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!buf->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) buf->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) buf->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) buf->size = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* Callers should take appropriate locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static struct port_buffer *get_inbuf(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (port->inbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return port->inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) buf = virtqueue_get_buf(port->in_vq, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) buf->len = min_t(size_t, len, buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) buf->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) port->stats.bytes_received += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Create a scatter-gather list representing our input buffer and put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * it in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * Callers should take appropriate locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sg_init_one(sg, buf->buf, buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) virtqueue_kick(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ret = vq->num_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Discard any unread data this port has. Callers lockers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void discard_port_data(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!port->portdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Device has been unplugged. vqs are already gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) buf = get_inbuf(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) while (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) port->stats.bytes_discarded += buf->len - buf->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (add_inbuf(port->in_vq, buf) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) err++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) free_buf(buf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) port->inbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) buf = get_inbuf(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static bool port_has_data(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock_irqsave(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) port->inbuf = get_inbuf(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (port->inbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_unlock_irqrestore(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned int event, unsigned int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) vq = portdev->c_ovq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_lock(&portdev->c_ovq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) virtqueue_kick(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) while (!virtqueue_get_buf(vq, &len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) && !virtqueue_is_broken(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) spin_unlock(&portdev->c_ovq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static ssize_t send_control_msg(struct port *port, unsigned int event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Did the port get unplugged before userspace closed it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (port->portdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return __send_control_msg(port->portdev, port->id, event, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Callers must take the port->outvq_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static void reclaim_consumed_buffers(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!port->portdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Device has been unplugged. vqs are already gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) free_buf(buf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) port->outvq_full = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int nents, size_t in_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void *data, bool nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct virtqueue *out_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) out_vq = port->out_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_lock_irqsave(&port->outvq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Tell Host to go! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) virtqueue_kick(out_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) in_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (out_vq->num_free == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) port->outvq_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Wait till the host acknowledges it pushed out the data we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * sent. This is done for data from the hvc_console; the tty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * operations are performed with spinlocks held so we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * sleep here. An alternative would be to copy the data to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * buffer and relax the spinning requirement. The downside is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * we need to kmalloc a GFP_ATOMIC buffer each time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * console driver writes something out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) while (!virtqueue_get_buf(out_vq, &len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) && !virtqueue_is_broken(out_vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) spin_unlock_irqrestore(&port->outvq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) port->stats.bytes_sent += in_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * We're expected to return the amount of data we wrote -- all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return in_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Give out the data that's requested from the buffer that we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * queued up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) size_t out_count, bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!out_count || !port_has_data(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) buf = port->inbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) out_count = min(out_count, buf->len - buf->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (to_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) memcpy((__force char *)out_buf, buf->buf + buf->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) out_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) buf->offset += out_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (buf->offset == buf->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * We're done using all the data in this buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Re-queue so that the Host can send us more data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_lock_irqsave(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) port->inbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (add_inbuf(port->in_vq, buf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dev_warn(port->dev, "failed add_buf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) spin_unlock_irqrestore(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* Return the number of bytes actually copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return out_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* The condition that must be true for polling to end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static bool will_read_block(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!port->guest_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Port got hot-unplugged. Let's exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return !port_has_data(port) && port->host_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static bool will_write_block(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!port->guest_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Port got hot-unplugged. Let's exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!port->host_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_lock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Check if the Host has consumed any buffers since we last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * sent data (this is only applicable for nonblocking ports).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ret = port->outvq_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_unlock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) size_t count, loff_t *offp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Port is hot-unplugged. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!port->guest_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!port_has_data(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * If nothing's connected on the host just return 0 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * case of list_empty; this tells the userspace app
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * that there's no connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!port->host_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (filp->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ret = wait_event_freezable(port->waitqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) !will_read_block(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Port got hot-unplugged while we were waiting above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!port->guest_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * We could've received a disconnection message while we were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * waiting for more data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * This check is not clubbed in the if() statement above as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * might receive some data as well as the host could get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * disconnected after we got woken up from our wait. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * really want to give off whatever data we have and only then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * check for host_connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!port_has_data(port) && !port->host_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return fill_readbuf(port, ubuf, count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int wait_port_writable(struct port *port, bool nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (will_write_block(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ret = wait_event_freezable(port->waitqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) !will_write_block(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Port got hot-unplugged. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (!port->guest_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) size_t count, loff_t *offp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bool nonblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Userspace could be out to fool us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) nonblock = filp->f_flags & O_NONBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ret = wait_port_writable(port, nonblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) count = min((size_t)(32 * 1024), count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) buf = alloc_buf(port->portdev->vdev, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ret = copy_from_user(buf->buf, ubuf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * We now ask send_buf() to not spin for generic ports -- we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * can re-use the same code path that non-blocking file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * descriptors take for blocking file descriptors since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * wait is already done and we're certain the write will go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * through to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) nonblock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) sg_init_one(sg, buf->buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ret = __send_to_port(port, sg, 1, count, buf, nonblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (nonblock && ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) free_buf(buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct sg_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct splice_desc *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct sg_list *sgl = sd->u.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned int offset, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (sgl->n == sgl->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Try lock this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (pipe_buf_try_steal(pipe, buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* Get reference and unlock page for moving */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) get_page(buf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unlock_page(buf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) len = min(buf->len, sd->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Failback to copying a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct page *page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) char *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) offset = sd->pos & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) len = sd->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (len + offset > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) len = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) src = kmap_atomic(buf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) memcpy(page_address(page) + offset, src + buf->offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) sgl->n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) sgl->len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Faster zero-copy write by splicing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct file *filp, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct port *port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct sg_list sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct splice_desc sd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .total_len = len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .flags = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .pos = *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .u.data = &sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned int occupancy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Rproc_serial does not yet support splice. To support splice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * pipe_to_sg() must allocate dma-buffers and copy content from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * regular pages to dma pages. And alloc_buf and free_buf must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * support allocating and freeing such a list of dma-buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (is_rproc_serial(port->out_vq->vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) pipe_lock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (pipe_empty(pipe->head, pipe->tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) occupancy = pipe_occupancy(pipe->head, pipe->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) buf = alloc_buf(port->portdev->vdev, 0, occupancy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) sgl.n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) sgl.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) sgl.size = occupancy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) sgl.sg = buf->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) sg_init_table(sgl.sg, sgl.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) pipe_unlock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (likely(ret > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) free_buf(buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pipe_unlock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) __poll_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) poll_wait(filp, &port->waitqueue, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!port->guest_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* Port got unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!will_read_block(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ret |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!will_write_block(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ret |= EPOLLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!port->host_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ret |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void remove_port(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int port_fops_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Notify host of port being closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) spin_lock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) port->guest_connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) discard_port_data(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) spin_lock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) spin_unlock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) reclaim_dma_bufs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * Locks aren't necessary here as a port can't be opened after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * unplug, and if a port isn't unplugged, a kref would already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * exist for the port. Plus, taking ports_lock here would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * create a dependency on other locks taken by functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * inside remove_port if we're the last holder of the port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * creating many problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kref_put(&port->kref, remove_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int port_fops_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct cdev *cdev = inode->i_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* We get the port with a kref here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) port = find_port_by_devt(cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* Port was unplugged before we could proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) filp->private_data = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Don't allow opening of console port devices -- that's done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * via /dev/hvc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (is_console_port(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Allow only one process to open a particular port at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) spin_lock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (port->guest_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) port->guest_connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) spin_lock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * There might be a chance that we missed reclaiming a few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * buffers in the window of the port getting previously closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * and opening now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) spin_unlock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) nonseekable_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* Notify host of port being opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) kref_put(&port->kref, remove_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static int port_fops_fasync(int fd, struct file *filp, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) port = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return fasync_helper(fd, filp, mode, &port->async_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * The file operations that we support: programs in the guest can open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * a console device, read from it, write to it, poll for data and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * close it. The devices are at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * /dev/vport<device number>p<port number>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static const struct file_operations port_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .open = port_fops_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .read = port_fops_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .write = port_fops_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .splice_write = port_fops_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .poll = port_fops_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .release = port_fops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .fasync = port_fops_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * The put_chars() callback is pretty straightforward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * We turn the characters into a scatter-gather list, add it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * output queue and then kick the Host. Then we sit here waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * it to finish: inefficient in theory, but in practice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * implementations will do it immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static int put_chars(u32 vtermno, const char *buf, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (unlikely(early_put_chars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return early_put_chars(vtermno, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) port = find_port_by_vtermno(vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) data = kmemdup(buf, count, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) sg_init_one(sg, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ret = __send_to_port(port, sg, 1, count, data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * get_chars() is the callback from the hvc_console infrastructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * when an interrupt is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * We call out to fill_readbuf that gets us the required data from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * buffers that are queued up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static int get_chars(u32 vtermno, char *buf, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* If we've not set up the port yet, we have no input to give. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (unlikely(early_put_chars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) port = find_port_by_vtermno(vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* If we don't have an input queue yet, we can't get input. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) BUG_ON(!port->in_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return fill_readbuf(port, (__force char __user *)buf, count, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void resize_console(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* The port could have been hot-unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!port || !is_console_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) vdev = port->portdev->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!is_rproc_serial(vdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) hvc_resize(port->cons.hvc, port->cons.ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* We set the configuration at this point, since we now have a tty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int notifier_add_vio(struct hvc_struct *hp, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) port = find_port_by_vtermno(hp->vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) hp->irq_requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) resize_console(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static void notifier_del_vio(struct hvc_struct *hp, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) hp->irq_requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* The operations for console ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static const struct hv_ops hv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) .get_chars = get_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .put_chars = put_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .notifier_add = notifier_add_vio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .notifier_del = notifier_del_vio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) .notifier_hangup = notifier_del_vio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * Console drivers are initialized very early so boot messages can go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * out, so we do things slightly differently from the generic virtio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * initialization of the net and block drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * At this stage, the console is output-only. It's too early to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * up a virtqueue, so we let the drivers do some boutique early-output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) early_put_chars = put_chars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return hvc_instantiate(0, 0, &hv_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int init_port_console(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * The Host's telling us this port is a console port. Hook it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * up with an hvc console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * To set up and manage our virtual console, we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * hvc_alloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * The first argument of hvc_alloc() is the virtual console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * number. The second argument is the parameter for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * notification mechanism (like irq number). We currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * leave this as zero, virtqueues have implicit notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * The third argument is a "struct hv_ops" containing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * put_chars() get_chars(), notifier_add() and notifier_del()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * pointers. The final argument is the output buffer size: we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * can do any size, so we put PAGE_SIZE here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) port->cons.vtermno = pdrvdata.next_vtermno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (IS_ERR(port->cons.hvc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ret = PTR_ERR(port->cons.hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) dev_err(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) "error %d allocating hvc for port\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) port->cons.hvc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) spin_lock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) pdrvdata.next_vtermno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) list_add_tail(&port->cons.list, &pdrvdata.consoles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) spin_unlock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) port->guest_connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * Start using the new console output if this is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * console to come up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (early_put_chars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) early_put_chars = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* Notify host of port being opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static ssize_t show_port_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct device_attribute *attr, char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) port = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return sprintf(buffer, "%s\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static struct attribute *port_sysfs_entries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) &dev_attr_name.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static const struct attribute_group port_attribute_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) .name = NULL, /* put in device directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) .attrs = port_sysfs_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static int port_debugfs_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct port *port = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) seq_printf(s, "name: %s\n", port->name ? port->name : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) seq_printf(s, "guest_connected: %d\n", port->guest_connected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) seq_printf(s, "host_connected: %d\n", port->host_connected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) seq_printf(s, "outvq_full: %d\n", port->outvq_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) seq_printf(s, "is_console: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) is_console_port(port) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) DEFINE_SHOW_ATTRIBUTE(port_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void set_console_size(struct port *port, u16 rows, u16 cols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (!port || !is_console_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) port->cons.ws.ws_row = rows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) port->cons.ws.ws_col = cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) int nr_added_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) nr_added_bufs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) spin_lock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = add_inbuf(vq, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) spin_unlock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) free_buf(buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) nr_added_bufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) spin_unlock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) } while (ret > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return nr_added_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void send_sigio_to_port(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (port->async_queue && port->guest_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static int add_port(struct ports_device *portdev, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) char debugfs_name[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) dev_t devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) port = kmalloc(sizeof(*port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) kref_init(&port->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) port->portdev = portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) port->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) port->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) port->inbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) port->cons.hvc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) port->async_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) port->cons.vtermno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) port->host_connected = port->guest_connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) port->stats = (struct port_stats) { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) port->outvq_full = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) port->in_vq = portdev->in_vqs[port->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) port->out_vq = portdev->out_vqs[port->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) port->cdev = cdev_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (!port->cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto free_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) port->cdev->ops = &port_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) devt = MKDEV(portdev->chr_major, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) err = cdev_add(port->cdev, devt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) dev_err(&port->portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) "Error %d adding cdev for port %u\n", err, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) goto free_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) devt, port, "vport%up%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) port->portdev->vdev->index, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (IS_ERR(port->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) err = PTR_ERR(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) dev_err(&port->portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) "Error %d creating device for port %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) err, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) goto free_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) spin_lock_init(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) spin_lock_init(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) init_waitqueue_head(&port->waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* We can safely ignore ENOSPC because it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * the queue already has buffers. Buffers are removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * only by virtcons_remove(), not by unplug_port()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) err = fill_queue(port->in_vq, &port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (err < 0 && err != -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dev_err(port->dev, "Error allocating inbufs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto free_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (is_rproc_serial(port->portdev->vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * For rproc_serial assume remote processor is connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * rproc_serial does not want the console port, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * the generic port implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) port->host_connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) else if (!use_multiport(port->portdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * If we're not using multiport support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * this has to be a console port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) err = init_port_console(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) goto free_inbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) spin_lock_irq(&portdev->ports_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) list_add_tail(&port->list, &port->portdev->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) spin_unlock_irq(&portdev->ports_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * Tell the Host we're set so that it can send us various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * configuration parameters for this port (eg, port name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * caching, whether this is a console port, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (pdrvdata.debugfs_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * Finally, create the debugfs file that we can use to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * inspect a port's state at any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) port->portdev->vdev->index, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) pdrvdata.debugfs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) &port_debugfs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) free_inbufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) free_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) device_destroy(pdrvdata.class, port->dev->devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) free_cdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) cdev_del(port->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) free_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* The host might want to notify management sw about port add failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* No users remain, remove all port-specific data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static void remove_port(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) port = container_of(kref, struct port, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static void remove_port_data(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) spin_lock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* Remove unused data this port might have received. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) discard_port_data(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) spin_lock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) spin_unlock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * Port got unplugged. Remove port from portdev's list and drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * kref reference. If no userspace has this port opened, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * result in immediate removal the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static void unplug_port(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) spin_lock_irq(&port->portdev->ports_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) list_del(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) spin_unlock_irq(&port->portdev->ports_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) spin_lock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (port->guest_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* Let the app know the port is going down. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) send_sigio_to_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Do this after sigio is actually sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) port->guest_connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) port->host_connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) wake_up_interruptible(&port->waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (is_console_port(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) spin_lock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) list_del(&port->cons.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) spin_unlock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) hvc_remove(port->cons.hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) remove_port_data(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * We should just assume the device itself has gone off --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * else a close on an open port later will try to send out a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * control message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) port->portdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) device_destroy(pdrvdata.class, port->dev->devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) cdev_del(port->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) debugfs_remove(port->debugfs_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) kfree(port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * Locks around here are not necessary - a port can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * opened after we removed the port struct from ports_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) kref_put(&port->kref, remove_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* Any private messages that the Host and Guest want to share */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static void handle_control_message(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct ports_device *portdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct port_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct virtio_console_control *cpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) size_t name_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* No valid header at start of buffer. Drop it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) dev_dbg(&portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) "Invalid index %u in control packet\n", cpkt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) switch (virtio16_to_cpu(vdev, cpkt->event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) case VIRTIO_CONSOLE_PORT_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) dev_dbg(&portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) "Port %u already added\n", port->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (virtio32_to_cpu(vdev, cpkt->id) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) portdev->max_nr_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dev_warn(&portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) "Request for adding port with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) "out-of-bound id %u, max. supported id: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) cpkt->id, portdev->max_nr_ports - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) case VIRTIO_CONSOLE_PORT_REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) unplug_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) case VIRTIO_CONSOLE_CONSOLE_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (!cpkt->value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (is_console_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) init_port_console(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) complete(&early_console_added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * Could remove the port here in case init fails - but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * have to notify the host first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) case VIRTIO_CONSOLE_RESIZE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) __u16 rows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) __u16 cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) } size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!is_console_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sizeof(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) set_console_size(port, size.rows, size.cols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) port->cons.hvc->irq_requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) resize_console(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) case VIRTIO_CONSOLE_PORT_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) wake_up_interruptible(&port->waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * If the host port got closed and the host had any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * unconsumed buffers, we'll be able to reclaim them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) spin_lock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) reclaim_consumed_buffers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) spin_unlock_irq(&port->outvq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * If the guest is connected, it'll be interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * knowing the host connection state changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) spin_lock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) send_sigio_to_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) spin_unlock_irq(&port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) case VIRTIO_CONSOLE_PORT_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * If we woke up after hibernation, we can get this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * again. Skip it in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (port->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * Skip the size of the header and the cpkt to get the size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * of the name that was sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) port->name = kmalloc(name_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (!port->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dev_err(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) "Not enough space to store port name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) name_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) port->name[name_size - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * Since we only have one sysfs attribute, 'name',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * create it only if we have a name for the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) err = sysfs_create_group(&port->dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) &port_attribute_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) dev_err(port->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) "Error %d creating sysfs device attributes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * Generate a udev event so that appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * symlinks can be created based on udev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) static void control_work_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) portdev = container_of(work, struct ports_device, control_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) vq = portdev->c_ivq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) spin_lock(&portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) while ((buf = virtqueue_get_buf(vq, &len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) spin_unlock(&portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) buf->len = min_t(size_t, len, buf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) buf->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) handle_control_message(vq->vdev, portdev, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) spin_lock(&portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (add_inbuf(portdev->c_ivq, buf) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) dev_warn(&portdev->vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) "Error adding buffer to queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) free_buf(buf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) spin_unlock(&portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static void flush_bufs(struct virtqueue *vq, bool can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) while ((buf = virtqueue_get_buf(vq, &len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) free_buf(buf, can_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void out_intr(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) port = find_port_by_vq(vq->vdev->priv, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) flush_bufs(vq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) wake_up_interruptible(&port->waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static void in_intr(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) port = find_port_by_vq(vq->vdev->priv, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) flush_bufs(vq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) spin_lock_irqsave(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) port->inbuf = get_inbuf(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * Normally the port should not accept data when the port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * closed. For generic serial ports, the host won't (shouldn't)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * send data till the guest is connected. But this condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * can be reached when a console port is not yet connected (no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * tty is spawned) and the other side sends out data over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * vring, or when a remote devices start sending data before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * the ports are opened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * A generic serial port will discard data if not connected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * while console ports and rproc-serial ports accepts data at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * any time. rproc-serial is initiated with guest_connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * false because port_fops_open expects this. Console ports are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * hooked up with an HVC console and is initialized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * guest_connected to true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) discard_port_data(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* Send a SIGIO indicating new data in case the process asked for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) send_sigio_to_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) spin_unlock_irqrestore(&port->inbuf_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) wake_up_interruptible(&port->waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (is_console_port(port) && hvc_poll(port->cons.hvc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) hvc_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) static void control_intr(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) portdev = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) schedule_work(&portdev->control_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static void config_intr(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) portdev = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) schedule_work(&portdev->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static void config_work_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) portdev = container_of(work, struct ports_device, config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!use_multiport(portdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) u16 rows, cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) vdev = portdev->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) virtio_cread(vdev, struct virtio_console_config, cols, &cols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) virtio_cread(vdev, struct virtio_console_config, rows, &rows);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) port = find_port_by_id(portdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) set_console_size(port, rows, cols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * We'll use this way of resizing only for legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * support. For newer userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * to indicate console size changes so that it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * done per-port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) resize_console(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static int init_vqs(struct ports_device *portdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) vq_callback_t **io_callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) char **io_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) u32 i, j, nr_ports, nr_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) nr_ports = portdev->max_nr_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) !portdev->out_vqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * For backward compat (newer host but older guest), the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * spawns a console port first and also inits the vqs for port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * 0 before others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) io_callbacks[j] = in_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) io_callbacks[j + 1] = out_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) io_names[j] = "input";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) io_names[j + 1] = "output";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) j += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (use_multiport(portdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) io_callbacks[j] = control_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) io_callbacks[j + 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) io_names[j] = "control-i";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) io_names[j + 1] = "control-o";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) for (i = 1; i < nr_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) j += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) io_callbacks[j] = in_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) io_callbacks[j + 1] = out_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) io_names[j] = "input";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) io_names[j + 1] = "output";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* Find the queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) err = virtio_find_vqs(portdev->vdev, nr_queues, vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) io_callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) (const char **)io_names, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) portdev->in_vqs[0] = vqs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) portdev->out_vqs[0] = vqs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) j += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (use_multiport(portdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) portdev->c_ivq = vqs[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) portdev->c_ovq = vqs[j + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) for (i = 1; i < nr_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) j += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) portdev->in_vqs[i] = vqs[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) portdev->out_vqs[i] = vqs[j + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) kfree(io_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) kfree(io_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) kfree(portdev->out_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) kfree(portdev->in_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) kfree(io_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) kfree(io_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) static const struct file_operations portdev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static void remove_vqs(struct ports_device *portdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) virtio_device_for_each_vq(portdev->vdev, vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) struct port_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) flush_bufs(vq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) while ((buf = virtqueue_detach_unused_buf(vq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) free_buf(buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) portdev->vdev->config->del_vqs(portdev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) kfree(portdev->in_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) kfree(portdev->out_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static void virtcons_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct port *port, *port2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) portdev = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) spin_lock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) list_del(&portdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) spin_unlock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Device is going away, exit any polling for buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) virtio_break_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) flush_work(&portdev->control_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) flush_work(&portdev->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /* Disable interrupts for vqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Finish up work that's lined up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) cancel_work_sync(&portdev->control_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) cancel_work_sync(&portdev->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) list_for_each_entry_safe(port, port2, &portdev->ports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) unplug_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) unregister_chrdev(portdev->chr_major, "virtio-portsdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * When yanking out a device, we immediately lose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * (device-side) queues. So there's no point in keeping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * guest side around till we drop our final reference. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * also means that any ports which are in an open state will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * have to just stop using the port, as the vqs are going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) remove_vqs(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) kfree(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * Once we're further in boot, we get probed like any other virtio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * If the host also supports multiple console ports, we check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * config space to see how many ports the host has spawned. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * initialize each port found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int virtcons_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) bool multiport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) bool early = early_put_chars != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* We only need a config space if features are offered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (!vdev->config->get &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) dev_err(&vdev->dev, "%s failure: config access disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /* Ensure to read early_put_chars now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (!portdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) /* Attach this portdev to this virtio_device, and vice-versa. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) portdev->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) vdev->priv = portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) portdev->chr_major = register_chrdev(0, "virtio-portsdev",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) &portdev_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (portdev->chr_major < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) dev_err(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) "Error %d registering chrdev for device %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) portdev->chr_major, vdev->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) err = portdev->chr_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) multiport = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) portdev->max_nr_ports = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (!is_rproc_serial(vdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct virtio_console_config, max_nr_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) &portdev->max_nr_ports) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) multiport = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) err = init_vqs(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) goto free_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) spin_lock_init(&portdev->ports_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) INIT_LIST_HEAD(&portdev->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) INIT_LIST_HEAD(&portdev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) virtio_device_ready(portdev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) INIT_WORK(&portdev->config_work, &config_work_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) INIT_WORK(&portdev->control_work, &control_work_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (multiport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) spin_lock_init(&portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) spin_lock_init(&portdev->c_ovq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) dev_err(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) "Error allocating buffers for control queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * The host might want to notify mgmt sw about device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * add failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) VIRTIO_CONSOLE_DEVICE_READY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /* Device was functional: we need full cleanup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) virtcons_remove(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * For backward compatibility: Create a console port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * if we're running on older host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) add_port(portdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) spin_lock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) list_add_tail(&portdev->list, &pdrvdata.portdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) spin_unlock_irq(&pdrvdata_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) VIRTIO_CONSOLE_DEVICE_READY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * If there was an early virtio console, assume that there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * other consoles. We need to wait until the hvc_alloc matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * hvc_instantiate, otherwise tty_open will complain, resulting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * a "Warning: unable to open an initial console" boot failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * Without multiport this is done in add_port above. With multiport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * this might take some host<->guest communication - thus we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (multiport && early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) wait_for_completion(&early_console_added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) free_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) unregister_chrdev(portdev->chr_major, "virtio-portsdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) kfree(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static const struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) static const unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) VIRTIO_CONSOLE_F_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) VIRTIO_CONSOLE_F_MULTIPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static const struct virtio_device_id rproc_serial_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) #if IS_ENABLED(CONFIG_REMOTEPROC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static const unsigned int rproc_serial_features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) static int virtcons_freeze(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) portdev = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) virtqueue_disable_cb(portdev->c_ivq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) cancel_work_sync(&portdev->control_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cancel_work_sync(&portdev->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * Once more: if control_work_handler() was running, it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * enable the cb as the last step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) virtqueue_disable_cb(portdev->c_ivq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) list_for_each_entry(port, &portdev->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) virtqueue_disable_cb(port->in_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) virtqueue_disable_cb(port->out_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * We'll ask the host later if the new invocation has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * the port opened or closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) port->host_connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) remove_port_data(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) remove_vqs(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static int virtcons_restore(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct ports_device *portdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) struct port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) portdev = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ret = init_vqs(portdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) virtio_device_ready(portdev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (use_multiport(portdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) list_for_each_entry(port, &portdev->ports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) port->in_vq = portdev->in_vqs[port->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) port->out_vq = portdev->out_vqs[port->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) fill_queue(port->in_vq, &port->inbuf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /* Get port open/close status on the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * If a port was open at the time of suspending, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * have to let the host know that it's still open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (port->guest_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static struct virtio_driver virtio_console = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) .feature_table = features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) .feature_table_size = ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) .driver.name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) .driver.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) .id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) .probe = virtcons_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) .remove = virtcons_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) .config_changed = config_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) .freeze = virtcons_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) .restore = virtcons_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static struct virtio_driver virtio_rproc_serial = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) .feature_table = rproc_serial_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) .feature_table_size = ARRAY_SIZE(rproc_serial_features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) .driver.name = "virtio_rproc_serial",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) .driver.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) .id_table = rproc_serial_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) .probe = virtcons_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) .remove = virtcons_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) static int __init init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (IS_ERR(pdrvdata.class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) err = PTR_ERR(pdrvdata.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) pr_err("Error %d creating virtio-ports class\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (!pdrvdata.debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) pr_warn("Error creating debugfs dir for virtio-ports\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) INIT_LIST_HEAD(&pdrvdata.consoles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) INIT_LIST_HEAD(&pdrvdata.portdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) err = register_virtio_driver(&virtio_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) pr_err("Error %d registering virtio driver\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) err = register_virtio_driver(&virtio_rproc_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) pr_err("Error %d registering virtio rproc serial driver\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) goto unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) unregister_virtio_driver(&virtio_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) debugfs_remove_recursive(pdrvdata.debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) class_destroy(pdrvdata.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) static void __exit fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) reclaim_dma_bufs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) unregister_virtio_driver(&virtio_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) unregister_virtio_driver(&virtio_rproc_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) class_destroy(pdrvdata.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) debugfs_remove_recursive(pdrvdata.debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) module_init(init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) module_exit(fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) MODULE_DESCRIPTION("Virtio console driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) MODULE_LICENSE("GPL");