^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * core function to access sclp interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 1999, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Martin Peschke <mpeschke@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "sclp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SCLP_HEADER "sclp: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Lock to protect internal data consistency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static DEFINE_SPINLOCK(sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Mask of events that we can send to the sclp interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static sccb_mask_t sclp_receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Mask of events that we can receive from the sclp interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static sccb_mask_t sclp_send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* List of registered event listeners and senders. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct list_head sclp_reg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* List of queued requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct list_head sclp_req_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Data for read and and init requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct sclp_req sclp_read_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct sclp_req sclp_init_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void *sclp_read_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static struct init_sccb *sclp_init_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Suspend request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DECLARE_COMPLETION(sclp_request_queue_flushed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int sclp_console_pages = SCLP_CONSOLE_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Flag to indicate if buffer pages are dropped on buffer full condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int sclp_console_drop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Number of times the console dropped buffer pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long sclp_console_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) complete(&sclp_request_queue_flushed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static int __init sclp_setup_console_pages(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int pages, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rc = kstrtoint(str, 0, &pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!rc && pages >= SCLP_CONSOLE_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sclp_console_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __setup("sclp_con_pages=", sclp_setup_console_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int __init sclp_setup_console_drop(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int drop, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rc = kstrtoint(str, 0, &drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) sclp_console_drop = drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __setup("sclp_con_drop=", sclp_setup_console_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static struct sclp_req sclp_suspend_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Timer for request retries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static struct timer_list sclp_request_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Timer for queued requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct timer_list sclp_queue_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Internal state: is a request active at the sclp? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static volatile enum sclp_running_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) sclp_running_state_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) sclp_running_state_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) sclp_running_state_reset_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) } sclp_running_state = sclp_running_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Internal state: is a read request pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static volatile enum sclp_reading_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sclp_reading_state_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) sclp_reading_state_reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } sclp_reading_state = sclp_reading_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* Internal state: is the driver currently serving requests? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static volatile enum sclp_activation_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) sclp_activation_state_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) sclp_activation_state_deactivating,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) sclp_activation_state_inactive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sclp_activation_state_activating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } sclp_activation_state = sclp_activation_state_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Internal state: is an init mask request pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static volatile enum sclp_mask_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) sclp_mask_state_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sclp_mask_state_initializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } sclp_mask_state = sclp_mask_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Internal state: is the driver suspended? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static enum sclp_suspend_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sclp_suspend_state_running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) sclp_suspend_state_suspended,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } sclp_suspend_state = sclp_suspend_state_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Maximum retry counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SCLP_INIT_RETRY 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define SCLP_MASK_RETRY 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Timeout intervals in seconds.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define SCLP_BUSY_INTERVAL 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define SCLP_RETRY_INTERVAL 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void sclp_request_timeout(bool force_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void sclp_process_queue(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void __sclp_make_read_req(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int sclp_init_mask(int calculate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int sclp_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __sclp_queue_read_req(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (sclp_reading_state == sclp_reading_state_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sclp_reading_state = sclp_reading_state_reading;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __sclp_make_read_req();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Add request to head of queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) list_add(&sclp_read_req.list, &sclp_req_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Set up request retry timer. Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) del_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) sclp_request_timer.function = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) sclp_request_timer.expires = jiffies + time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) add_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void sclp_request_timeout_restart(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sclp_request_timeout(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void sclp_request_timeout_normal(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sclp_request_timeout(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Request timeout handler. Restart the request queue. If force_restart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * force restart of running request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void sclp_request_timeout(bool force_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (force_restart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (sclp_running_state == sclp_running_state_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Break running state and queue NOP read event request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * to get a defined interface state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) __sclp_queue_read_req();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) sclp_running_state = sclp_running_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sclp_request_timeout_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sclp_process_queue();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Returns the expire value in jiffies of the next pending request timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * if any. Needs to be called with sclp_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static unsigned long __sclp_req_queue_find_next_timeout(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long expires_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) list_for_each_entry(req, &sclp_req_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!req->queue_expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!expires_next ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (time_before(req->queue_expires, expires_next)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) expires_next = req->queue_expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Returns expired request, if any, and removes it from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned long flags, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Don't need list_for_each_safe because we break out after list_del */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) list_for_each_entry(req, &sclp_req_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!req->queue_expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (time_before_eq(req->queue_expires, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (req->status == SCLP_REQ_QUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) req->status = SCLP_REQ_QUEUED_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Timeout handler for queued requests. Removes request from list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * invokes callback. This timer can be set per request in situations where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * waiting too long would be harmful to the system, e.g. during SE reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void sclp_req_queue_timeout(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned long flags, expires_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) req = __sclp_req_queue_remove_expired_req();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (req && req->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) req->callback(req, req->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } while (req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) expires_next = __sclp_req_queue_find_next_timeout();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (expires_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mod_timer(&sclp_queue_timer, expires_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Try to start a request. Return zero if the request was successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * started or if it will be started at a later time. Return non-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) __sclp_start_request(struct sclp_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (sclp_running_state != sclp_running_state_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) del_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rc = sclp_service_call(req->command, req->sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) req->start_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Successfully started request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) req->status = SCLP_REQ_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sclp_running_state = sclp_running_state_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) sclp_request_timeout_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } else if (rc == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sclp_request_timeout_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Request failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) req->status = SCLP_REQ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Try to start queued requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sclp_process_queue(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (sclp_running_state != sclp_running_state_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) del_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) while (!list_empty(&sclp_req_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) req = list_entry(sclp_req_queue.next, struct sclp_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!req->sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto do_post;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rc = __sclp_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Request failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (req->start_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Cannot abort already submitted request - could still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * be active at the SCLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sclp_request_timeout_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) do_post:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Post-processing for aborted request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (req->callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) req->callback(req, req->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int __sclp_can_add_request(struct sclp_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (req == &sclp_suspend_req || req == &sclp_init_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (sclp_suspend_state != sclp_suspend_state_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (sclp_init_state != sclp_init_state_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (sclp_activation_state != sclp_activation_state_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Queue a new request. Return zero on success, non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sclp_add_request(struct sclp_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!__sclp_can_add_request(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) req->status = SCLP_REQ_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) req->start_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) list_add_tail(&req->list, &sclp_req_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (req->queue_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) req->queue_expires = jiffies + req->queue_timeout * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!timer_pending(&sclp_queue_timer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) time_after(sclp_queue_timer.expires, req->queue_expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mod_timer(&sclp_queue_timer, req->queue_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) req->queue_expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Start if request is first in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (sclp_running_state == sclp_running_state_idle &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) req->list.prev == &sclp_req_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!req->sccb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rc = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) rc = __sclp_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) EXPORT_SYMBOL(sclp_add_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Dispatch events found in request buffer to registered listeners. Return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * if all events were dispatched, non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sclp_dispatch_evbufs(struct sccb_header *sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct evbuf_header *evbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct sclp_register *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (offset = sizeof(struct sccb_header); offset < sccb->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) offset += evbuf->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Check for malformed hardware response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (evbuf->length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Search for event handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) list_for_each(l, &sclp_reg_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) reg = list_entry(l, struct sclp_register, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (reg && reg->receiver_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) reg->receiver_fn(evbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else if (reg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Read event data request callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sclp_read_cb(struct sclp_req *req, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct sccb_header *sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sccb = (struct sccb_header *) req->sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sccb->response_code == 0x220))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sclp_dispatch_evbufs(sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sclp_reading_state = sclp_reading_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Prepare read event data request. Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void __sclp_make_read_req(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct sccb_header *sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sccb = (struct sccb_header *) sclp_read_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) clear_page(sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) memset(&sclp_read_req, 0, sizeof(struct sclp_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sclp_read_req.status = SCLP_REQ_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sclp_read_req.start_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sclp_read_req.callback = sclp_read_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sclp_read_req.sccb = sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) sccb->length = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) sccb->function_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sccb->control_mask[2] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Search request list for request with matching sccb. Return request if found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * NULL otherwise. Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static inline struct sclp_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __sclp_find_req(u32 sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) list_for_each(l, &sclp_req_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) req = list_entry(l, struct sclp_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (sccb == (u32) (addr_t) req->sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Handler for external interruption. Perform request post-processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Prepare read event data request if necessary. Start processing of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * request on queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void sclp_interrupt_handler(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned int param32, unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct sclp_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u32 finished_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u32 evbuf_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) inc_irq_stat(IRQEXT_SCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_lock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) finished_sccb = param32 & 0xfffffff8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) evbuf_pending = param32 & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (finished_sccb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) del_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) sclp_running_state = sclp_running_state_reset_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) req = __sclp_find_req(finished_sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Request post-processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) req->status = SCLP_REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (req->callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_unlock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) req->callback(req, req->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) spin_lock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sclp_running_state = sclp_running_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (evbuf_pending &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) sclp_activation_state == sclp_activation_state_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) __sclp_queue_read_req();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_unlock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) sclp_process_queue();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Convert interval in jiffies to TOD ticks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sclp_tod_from_jiffies(unsigned long jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return (u64) (jiffies / HZ) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Wait until a currently running request finished. Note: while this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * is running, no timers are served on the calling CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sclp_sync_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned long long old_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned long cr0, cr0_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u64 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int irq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* We'll be disabling timer interrupts, so we need a custom timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * mechanism */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (timer_pending(&sclp_request_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Get timeout TOD value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) timeout = get_tod_clock_fast() +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) sclp_tod_from_jiffies(sclp_request_timer.expires -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Prevent bottom half from executing once we force interrupts open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) irq_context = in_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!irq_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Enable service-signal interruption, disable timer interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) old_tick = local_tick_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) trace_hardirqs_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) __ctl_store(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cr0_sync |= 1UL << (63 - 54);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __ctl_load(cr0_sync, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __arch_local_irq_stosm(0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Loop until driver state indicates finished request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) while (sclp_running_state != sclp_running_state_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Check for expired request timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (timer_pending(&sclp_request_timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) get_tod_clock_fast() > timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) del_timer(&sclp_request_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sclp_request_timer.function(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) __ctl_load(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!irq_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) _local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) local_tick_enable(old_tick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) EXPORT_SYMBOL(sclp_sync_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Dispatch changes in send and receive mask to registered listeners. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sclp_dispatch_state_change(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct sclp_register *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) sccb_mask_t receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sccb_mask_t send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) list_for_each(l, &sclp_reg_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) reg = list_entry(l, struct sclp_register, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) receive_mask = reg->send_mask & sclp_receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) send_mask = reg->receive_mask & sclp_send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (reg->sclp_receive_mask != receive_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) reg->sclp_send_mask != send_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) reg->sclp_receive_mask = receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) reg->sclp_send_mask = send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (reg && reg->state_change_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) reg->state_change_fn(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } while (reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct sclp_statechangebuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct evbuf_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u8 validity_sclp_active_facility_mask : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) u8 validity_sclp_receive_mask : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) u8 validity_sclp_send_mask : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u8 validity_read_data_function_mask : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u16 _zeros : 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) u16 mask_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) u64 sclp_active_facility_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u8 masks[2 * 1021 + 4]; /* variable length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * u8 sclp_receive_mask[mask_length];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * u8 sclp_send_mask[mask_length];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * u32 read_data_function_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* State change event callback. Inform listeners of changes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) sclp_state_change_cb(struct evbuf_header *evbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct sclp_statechangebuf *scbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) scbuf = (struct sclp_statechangebuf *) evbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (scbuf->validity_sclp_receive_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sclp_receive_mask = sccb_get_recv_mask(scbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (scbuf->validity_sclp_send_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sclp_send_mask = sccb_get_send_mask(scbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (scbuf->validity_sclp_active_facility_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) sclp.facilities = scbuf->sclp_active_facility_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) sclp_dispatch_state_change();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static struct sclp_register sclp_state_change_event = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .receive_mask = EVTYP_STATECHANGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .receiver_fn = sclp_state_change_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Calculate receive and send mask of currently registered listeners.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct sclp_register *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *receive_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *send_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) list_for_each(l, &sclp_reg_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) t = list_entry(l, struct sclp_register, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *receive_mask |= t->receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *send_mask |= t->send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Register event listener. Return 0 on success, non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) sclp_register(struct sclp_register *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) sccb_mask_t receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) sccb_mask_t send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) rc = sclp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Check event mask for collisions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) __sclp_get_mask(&receive_mask, &send_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Trigger initial state change callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) reg->sclp_receive_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) reg->sclp_send_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) reg->pm_event_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) list_add(®->list, &sclp_reg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rc = sclp_init_mask(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) list_del(®->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) EXPORT_SYMBOL(sclp_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* Unregister event listener. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sclp_unregister(struct sclp_register *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) list_del(®->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) sclp_init_mask(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) EXPORT_SYMBOL(sclp_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Remove event buffers which are marked processed. Return the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * remaining event buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) sclp_remove_processed(struct sccb_header *sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct evbuf_header *evbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int unprocessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u16 remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) evbuf = (struct evbuf_header *) (sccb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) unprocessed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) remaining = sccb->length - sizeof(struct sccb_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) while (remaining > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) remaining -= evbuf->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (evbuf->flags & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sccb->length -= evbuf->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unprocessed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) evbuf = (struct evbuf_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ((addr_t) evbuf + evbuf->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return unprocessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) EXPORT_SYMBOL(sclp_remove_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Prepare init mask request. Called while sclp_lock is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct init_sccb *sccb = sclp_init_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) clear_page(sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) memset(&sclp_init_req, 0, sizeof(struct sclp_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sclp_init_req.status = SCLP_REQ_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) sclp_init_req.start_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) sclp_init_req.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) sclp_init_req.callback_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) sclp_init_req.sccb = sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) sccb->header.length = sizeof(*sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (sclp_mask_compat_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sccb->mask_length = sizeof(sccb_mask_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) sccb_set_recv_mask(sccb, receive_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sccb_set_send_mask(sccb, send_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) sccb_set_sclp_recv_mask(sccb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sccb_set_sclp_send_mask(sccb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Start init mask request. If calculate is non-zero, calculate the mask as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * requested by registered listeners. Use zero mask otherwise. Return 0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * success, non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) sclp_init_mask(int calculate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct init_sccb *sccb = sclp_init_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) sccb_mask_t receive_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) sccb_mask_t send_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned long wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Check if interface is in appropriate state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (sclp_mask_state != sclp_mask_state_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (sclp_activation_state == sclp_activation_state_inactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) sclp_mask_state = sclp_mask_state_initializing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Determine mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (calculate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) __sclp_get_mask(&receive_mask, &send_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) receive_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) send_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Prepare request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) __sclp_make_init_req(receive_mask, send_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (sclp_add_request(&sclp_init_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) while (time_before(jiffies, wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) sclp_sync_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) while (sclp_init_req.status != SCLP_REQ_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sclp_init_req.status != SCLP_REQ_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sclp_sync_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (sclp_init_req.status == SCLP_REQ_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) sccb->header.response_code == 0x20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Successful request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (calculate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) sclp_send_mask = sccb_get_sclp_send_mask(sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) sclp_receive_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) sclp_send_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sclp_dispatch_state_change();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) sclp_mask_state = sclp_mask_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Deactivate SCLP interface. On success, new requests will be rejected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * events will no longer be dispatched. Return 0 on success, non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sclp_deactivate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Deactivate can only be called when active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (sclp_activation_state != sclp_activation_state_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sclp_activation_state = sclp_activation_state_deactivating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rc = sclp_init_mask(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) sclp_activation_state = sclp_activation_state_inactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sclp_activation_state = sclp_activation_state_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) EXPORT_SYMBOL(sclp_deactivate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Reactivate SCLP interface after sclp_deactivate. On success, new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * requests will be accepted, events will be dispatched again. Return 0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * success, non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sclp_reactivate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Reactivate can only be called when inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (sclp_activation_state != sclp_activation_state_inactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) sclp_activation_state = sclp_activation_state_activating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) rc = sclp_init_mask(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sclp_activation_state = sclp_activation_state_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) sclp_activation_state = sclp_activation_state_inactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) EXPORT_SYMBOL(sclp_reactivate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* Handler for external interruption used during initialization. Modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * request state to done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void sclp_check_handler(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned int param32, unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 finished_sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) inc_irq_stat(IRQEXT_SCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) finished_sccb = param32 & 0xfffffff8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Is this the interrupt we are waiting for? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (finished_sccb == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) finished_sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_lock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (sclp_running_state == sclp_running_state_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) sclp_init_req.status = SCLP_REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) sclp_running_state = sclp_running_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_unlock(&sclp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* Initial init mask request timed out. Modify request state to failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) sclp_check_timeout(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (sclp_running_state == sclp_running_state_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) sclp_init_req.status = SCLP_REQ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) sclp_running_state = sclp_running_state_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Perform a check of the SCLP interface. Return zero if the interface is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * available and there are no pending requests from a previous instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * Return non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) sclp_check_interface(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct init_sccb *sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Prepare init mask command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) __sclp_make_init_req(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) sccb = (struct init_sccb *) sclp_init_req.sccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) rc = sclp_service_call(sclp_init_req.command, sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (rc == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) sclp_init_req.status = SCLP_REQ_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sclp_running_state = sclp_running_state_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) sclp_check_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Enable service-signal interruption - needs to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * with IRQs enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* Wait for signal from interrupt or timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) sclp_sync_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Disable service-signal interruption - needs to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * with IRQs enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) del_timer(&sclp_request_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (sclp_init_req.status == SCLP_REQ_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (sccb->header.response_code == 0x20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) } else if (sccb->header.response_code == 0x74f0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (!sclp_mask_compat_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) sclp_mask_compat_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * events from interfering with rebooted system. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) sclp_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static struct notifier_block sclp_reboot_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .notifier_call = sclp_reboot_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Suspend/resume SCLP notifier implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct sclp_register *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!rollback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_for_each_entry(reg, &sclp_reg_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) reg->pm_event_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) list_for_each_entry(reg, &sclp_reg_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (rollback && reg->pm_event_posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!rollback && !reg->pm_event_posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (reg->pm_event_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) reg->pm_event_fn(reg, sclp_pm_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) reg->pm_event_posted = rollback ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Susend/resume callbacks for platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int sclp_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) sclp_suspend_state = sclp_suspend_state_suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* Init supend data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) sclp_suspend_req.callback = sclp_suspend_req_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sclp_suspend_req.status = SCLP_REQ_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) init_completion(&sclp_request_queue_flushed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) rc = sclp_add_request(&sclp_suspend_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) wait_for_completion(&sclp_request_queue_flushed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) else if (rc != -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) goto fail_thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) rc = sclp_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto fail_thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) fail_thaw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) sclp_suspend_state = sclp_suspend_state_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static int sclp_undo_suspend(enum sclp_pm_event event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) rc = sclp_reactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) sclp_suspend_state = sclp_suspend_state_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sclp_pm_event(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static int sclp_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int sclp_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static const struct dev_pm_ops sclp_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .freeze = sclp_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .thaw = sclp_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .restore = sclp_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static ssize_t con_pages_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return sprintf(buf, "%i\n", sclp_console_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static DRIVER_ATTR_RO(con_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static ssize_t con_drop_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return sprintf(buf, "%i\n", sclp_console_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static DRIVER_ATTR_RO(con_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static ssize_t con_full_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return sprintf(buf, "%lu\n", sclp_console_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static DRIVER_ATTR_RO(con_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static struct attribute *sclp_drv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) &driver_attr_con_pages.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) &driver_attr_con_drop.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) &driver_attr_con_full.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static struct attribute_group sclp_drv_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .attrs = sclp_drv_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static const struct attribute_group *sclp_drv_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) &sclp_drv_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static struct platform_driver sclp_pdrv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .name = "sclp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .pm = &sclp_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .groups = sclp_drv_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static struct platform_device *sclp_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Initialize SCLP driver. Return zero if driver is operational, non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) sclp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* Check for previous or running initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (sclp_init_state != sclp_init_state_uninitialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) sclp_init_state = sclp_init_state_initializing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Set up variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) INIT_LIST_HEAD(&sclp_req_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) INIT_LIST_HEAD(&sclp_reg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) list_add(&sclp_state_change_event.list, &sclp_reg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) timer_setup(&sclp_request_timer, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* Check interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) rc = sclp_check_interface();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) spin_lock_irqsave(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) goto fail_init_state_uninitialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* Register reboot handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) rc = register_reboot_notifier(&sclp_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) goto fail_init_state_uninitialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* Register interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) goto fail_unregister_reboot_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) sclp_init_state = sclp_init_state_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* Enable service-signal external interruption - needs to happen with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * IRQs enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) sclp_init_mask(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) fail_unregister_reboot_notifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unregister_reboot_notifier(&sclp_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) fail_init_state_uninitialized:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) sclp_init_state = sclp_init_state_uninitialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) free_page((unsigned long) sclp_read_sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) free_page((unsigned long) sclp_init_sccb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) spin_unlock_irqrestore(&sclp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * to print the panic message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int sclp_panic_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) unsigned long event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (sclp_suspend_state == sclp_suspend_state_suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) sclp_undo_suspend(SCLP_PM_EVENT_THAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static struct notifier_block sclp_on_panic_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .notifier_call = sclp_panic_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .priority = SCLP_PANIC_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static __init int sclp_initcall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rc = platform_driver_register(&sclp_pdrv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) rc = PTR_ERR_OR_ZERO(sclp_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) goto fail_platform_driver_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) rc = atomic_notifier_chain_register(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) &sclp_on_panic_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) goto fail_platform_device_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return sclp_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) fail_platform_device_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) platform_device_unregister(sclp_pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) fail_platform_driver_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) platform_driver_unregister(&sclp_pdrv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) arch_initcall(sclp_initcall);