^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef S390_DEVICE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define S390_DEVICE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "io_sch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * states of the device statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) enum dev_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) DEV_STATE_NOT_OPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) DEV_STATE_SENSE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) DEV_STATE_OFFLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) DEV_STATE_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) DEV_STATE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) DEV_STATE_W4SENSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) DEV_STATE_DISBAND_PGID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) DEV_STATE_BOXED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* states to wait for i/o completion before doing something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) DEV_STATE_TIMEOUT_KILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DEV_STATE_QUIESCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* special states for devices gone not operational */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) DEV_STATE_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) DEV_STATE_DISCONNECTED_SENSE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) DEV_STATE_CMFCHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) DEV_STATE_CMFUPDATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) DEV_STATE_STEAL_LOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* last element! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) NR_DEV_STATES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * asynchronous events of the device statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) enum dev_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DEV_EVENT_NOTOPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) DEV_EVENT_INTERRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) DEV_EVENT_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) DEV_EVENT_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* last element! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NR_DEV_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct ccw_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * action called through jumptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int state = cdev->private->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (dev_event == DEV_EVENT_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (state == DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) inc_irq_stat(cdev->private->int_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) else if (state != DEV_STATE_CMFCHANGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) state != DEV_STATE_CMFUPDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) inc_irq_stat(IRQIO_CIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) dev_jumptable[state][dev_event](cdev, dev_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Delivers 1 if the device state is final.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dev_fsm_final_state(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return (cdev->private->state == DEV_STATE_NOT_OPER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) cdev->private->state == DEV_STATE_OFFLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cdev->private->state == DEV_STATE_ONLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) cdev->private->state == DEV_STATE_BOXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int __init io_subchannel_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void io_subchannel_recog_done(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void io_subchannel_init_config(struct subchannel *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int ccw_device_cancel_halt_clear(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int ccw_device_is_orphan(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void ccw_device_recognition(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int ccw_device_online(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int ccw_device_offline(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void ccw_device_update_sense_data(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int ccw_device_test_sense_data(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int ccw_purge_blacklisted(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Function prototypes for device status and basic sense stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ccw_device_do_sense(struct ccw_device *, struct irb *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Function prototype for internal request handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int lpm_adjust(int lpm, int mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void ccw_request_start(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ccw_request_cancel(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void ccw_request_handler(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void ccw_request_timeout(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void ccw_request_notoper(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Function prototypes for sense id stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void ccw_device_sense_id_start(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void ccw_device_sense_id_done(struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Function prototypes for path grouping stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void ccw_device_verify_start(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void ccw_device_verify_done(struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void ccw_device_disband_start(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void ccw_device_disband_done(struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int ccw_device_stlck(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Helper function for machine check handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void ccw_device_trigger_reprobe(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void ccw_device_kill_io(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int ccw_device_notify(struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void ccw_device_set_disconnected(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void ccw_device_set_notoper(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void ccw_device_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void ccw_device_set_timeout(struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void ccw_device_schedule_recovery(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Channel measurement facility related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void retry_set_schib(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void cmf_retry_copy_block(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int cmf_reenable(struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void cmf_reactivate(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int ccw_set_cmf(struct ccw_device *cdev, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) extern struct device_attribute dev_attr_cmb_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #endif