^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Linux on zSeries Channel Measurement Facility support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2000, 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Arnd Bergmann <arndb@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Cornelia Huck <cornelia.huck@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define KMSG_COMPONENT "cio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/timex.h> /* get_tod_clock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/cmb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "ioasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "chsc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * parameter to enable cmf during boot, possible uses are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * used on any subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * <num> subchannel, where <num> is an integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * between 1 and 65535, default is 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ARGSTRING "s390cmf"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* indices for READCMB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum cmb_index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) avg_utilization = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* basic and exended format: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) cmb_ssch_rsch_count = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cmb_sample_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cmb_device_connect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cmb_function_pending_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cmb_device_disconnect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) cmb_control_unit_queuing_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) cmb_device_active_only_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* extended format only: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cmb_device_busy_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) cmb_initial_command_response_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * enum cmb_format - types of supported measurement block formats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @CMF_BASIC: traditional channel measurement blocks supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * by all machines that we run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @CMF_EXTENDED: improved format that was introduced with the z990
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @CMF_AUTODETECT: default: use extended format when running on a machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * supporting extended format, otherwise fall back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * basic format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) enum cmb_format {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) CMF_BASIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) CMF_EXTENDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) CMF_AUTODETECT = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * format - actual format for all measurement blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The format module parameter can be set to a value of 0 (zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * or 1, indicating basic or extended format as described for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * enum cmb_format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int format = CMF_AUTODETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) module_param(format, bint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * struct cmb_operations - functions to use depending on cmb_format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Most of these functions operate on a struct ccw_device. There is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * one instance of struct cmb_operations because the format of the measurement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * data is guaranteed to be the same for every ccw_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @alloc: allocate memory for a channel measurement block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * either with the help of a special pool or with kmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @free: free memory allocated with @alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @set: enable or disable measurement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @read: read a measurement entry at an index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @readall: read a measurement block in a common format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @reset: clear the data in the associated measurement block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * reset its time stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct cmb_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int (*alloc) (struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void (*free) (struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int (*set) (struct ccw_device *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u64 (*read) (struct ccw_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int (*readall)(struct ccw_device *, struct cmbdata *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void (*reset) (struct ccw_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct attribute_group *attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static struct cmb_operations *cmbops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct cmb_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void *hw_block; /* Pointer to block updated by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void *last_block; /* Last changed block copied from hardware block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int size; /* Size of hw_block and last_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long long last_update; /* when last_block was updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Our user interface is designed in terms of nanoseconds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * while the hardware measures total times in its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline u64 time_to_nsec(u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return ((u64)value) * 128000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Users are usually interested in average times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * not accumulated time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * This also helps us with atomicity problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * when reading sinlge values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline u64 time_to_avg_nsec(u32 value, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* no samples yet, avoid division by 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* value comes in units of 128 µsec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ret = time_to_nsec(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) do_div(ret, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define CMF_OFF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define CMF_ON 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Activate or deactivate the channel monitor. When area is NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * the monitor is deactivated. The channel monitor needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * be active in order to measure subchannels, which also need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * to be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void cmf_activate(void *area, unsigned int onoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) register void * __gpr2 asm("2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) register long __gpr1 asm("1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __gpr2 = area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __gpr1 = onoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* activate channel measurement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sch->config.mme = mme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) sch->config.mbfc = mbfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* address can be either a block address or a block index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (mbfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) sch->config.mba = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sch->config.mbi = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ret = cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!mme && ret == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * The task was to disable measurement block updates but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * the subchannel is already gone. Report success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct set_schib_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u32 mme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int mbfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define CMF_PENDING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define SET_SCHIB_TIMEOUT (10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int set_schib_wait(struct ccw_device *cdev, u32 mme,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int mbfc, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct set_schib_struct set_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!cdev->private->cmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = set_schib(cdev, mme, mbfc, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* if the device is not online, don't even try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (cdev->private->state != DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) init_waitqueue_head(&set_data.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) set_data.mme = mme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) set_data.mbfc = mbfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) set_data.address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) set_data.ret = CMF_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) cdev->private->state = DEV_STATE_CMFCHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cdev->private->cmb_wait = &set_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = wait_event_interruptible_timeout(set_data.wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) set_data.ret != CMF_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) SET_SCHIB_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (set_data.ret == CMF_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) set_data.ret = (ret == 0) ? -ETIME : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (cdev->private->state == DEV_STATE_CMFCHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cdev->private->state = DEV_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cdev->private->cmb_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ret = set_data.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void retry_set_schib(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct set_schib_struct *set_data = cdev->private->cmb_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!set_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) set_data->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) wake_up(&set_data->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int cmf_copy_block(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void *hw_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Don't copy if a start function is in progress. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) (scsw_actl(&sch->schib.scsw) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) hw_block = cmb_data->hw_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) memcpy(cmb_data->last_block, hw_block, cmb_data->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cmb_data->last_update = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct copy_block_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static int cmf_cmb_copy_wait(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct copy_block_struct copy_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!cdev->private->cmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = cmf_copy_block(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (cdev->private->state != DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) init_waitqueue_head(©_block.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) copy_block.ret = CMF_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cdev->private->state = DEV_STATE_CMFUPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) cdev->private->cmb_wait = ©_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ret = wait_event_interruptible(copy_block.wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) copy_block.ret != CMF_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (copy_block.ret == CMF_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) copy_block.ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (cdev->private->state == DEV_STATE_CMFUPDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cdev->private->state = DEV_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cdev->private->cmb_wait = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ret = copy_block.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void cmf_retry_copy_block(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct copy_block_struct *copy_block = cdev->private->cmb_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!copy_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) copy_block->ret = cmf_copy_block(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) wake_up(©_block->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void cmf_generic_reset(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (cmb_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) memset(cmb_data->last_block, 0, cmb_data->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Need to reset hw block as well to make the hardware start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * from 0 again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) memset(cmb_data->hw_block, 0, cmb_data->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cmb_data->last_update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cdev->private->cmb_start_time = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * struct cmb_area - container for global cmb data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * @mem: pointer to CMBs (only in basic measurement mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * @list: contains a linked list of all subchannels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @num_channels: number of channels to be measured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @lock: protect concurrent access to @mem and @list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct cmb_area {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct cmb *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static struct cmb_area cmb_area = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .list = LIST_HEAD_INIT(cmb_area.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .num_channels = 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* ****** old style CMB handling ********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Basic channel measurement blocks are allocated in one contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * block of memory, which can not be moved as long as any channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * is active. Therefore, a maximum number of subchannels needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * be defined somewhere. This is a module parameter, defaulting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * a reasonable value of 1024, or 32 kb of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Current kernels don't allow kmalloc with more than 128kb, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * maximum is 4096.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * struct cmb - basic channel measurement block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @ssch_rsch_count: number of ssch and rsch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * @sample_count: number of samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @device_connect_time: time of device connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @function_pending_time: time of function pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @device_disconnect_time: time of device disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @control_unit_queuing_time: time of control unit queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @device_active_only_time: time of device active only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @reserved: unused in basic measurement mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * The measurement block as used by the hardware. The fields are described
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * further in z/Architecture Principles of Operation, chapter 17.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * The cmb area made up from these blocks must be a contiguous array and may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * not be reallocated or freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Only one cmb area can be present in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct cmb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u16 ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u16 sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) u32 device_connect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u32 function_pending_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) u32 device_disconnect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u32 control_unit_queuing_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u32 device_active_only_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 reserved[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Insert a single device into the cmb_area list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Called with cmb_area.lock held from alloc_cmb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int alloc_cmb_single(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct cmb_data *cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct cmb *cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct ccw_device_private *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!list_empty(&cdev->private->cmb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Find first unused cmb in cmb_area.mem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * This is a little tricky: cmb_area.list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * remains sorted by ->cmb->hw_data pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) cmb = cmb_area.mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) list_for_each_entry(node, &cmb_area.list, cmb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct cmb_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) data = node->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if ((struct cmb*)data->hw_block > cmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cmb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (cmb - cmb_area.mem >= cmb_area.num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* insert new cmb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) cmb_data->hw_block = cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) cdev->private->cmb = cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int alloc_cmb(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct cmb *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ssize_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Allocate private cmb_data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!cmb_data->last_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) kfree(cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) cmb_data->size = sizeof(struct cmb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!cmb_area.mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* there is no user yet, so we need a new area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) size = sizeof(struct cmb) * cmb_area.num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) WARN_ON(!list_empty(&cmb_area.list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (cmb_area.mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* ok, another thread was faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) free_pages((unsigned long)mem, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) } else if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* no luck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* everything ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) memset(mem, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cmb_area.mem = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cmf_activate(cmb_area.mem, CMF_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* do the actual allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = alloc_cmb_single(cdev, cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) kfree(cmb_data->last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) kfree(cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void free_cmb(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct ccw_device_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) priv = cdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) cmb_data = priv->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) priv->cmb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kfree(cmb_data->last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kfree(cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) list_del_init(&priv->cmb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (list_empty(&cmb_area.list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ssize_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) size = sizeof(struct cmb) * cmb_area.num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) cmf_activate(NULL, CMF_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) free_pages((unsigned long)cmb_area.mem, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cmb_area.mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static int set_cmb(struct ccw_device *cdev, u32 mme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!cdev->private->cmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return set_schib_wait(cdev, mme, 0, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* calculate utilization in 0.1 percent units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u64 device_disconnect_time, u64 start_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u64 utilization, elapsed_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) utilization = time_to_nsec(device_connect_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) function_pending_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) device_disconnect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) elapsed_time = get_tod_clock() - start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) elapsed_time = tod_to_ns(elapsed_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) elapsed_time /= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return elapsed_time ? (utilization / elapsed_time) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static u64 read_cmb(struct ccw_device *cdev, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct cmb *cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u64 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) cmb = cmb_data->hw_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case avg_utilization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = __cmb_utilization(cmb->device_connect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cmb->function_pending_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) cmb->device_disconnect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) cdev->private->cmb_start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case cmb_ssch_rsch_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = cmb->ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) case cmb_sample_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = cmb->sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) case cmb_device_connect_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) val = cmb->device_connect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) case cmb_function_pending_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) val = cmb->function_pending_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) case cmb_device_disconnect_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) val = cmb->device_disconnect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) case cmb_control_unit_queuing_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) val = cmb->control_unit_queuing_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) case cmb_device_active_only_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) val = cmb->device_active_only_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret = time_to_avg_nsec(val, cmb->sample_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct cmb *cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ret = cmf_cmb_copy_wait(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!cmb_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (cmb_data->last_update == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) cmb = cmb_data->last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) time = cmb_data->last_update - cdev->private->cmb_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) memset(data, 0, sizeof(struct cmbdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* we only know values before device_busy_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) data->size = offsetof(struct cmbdata, device_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) data->elapsed_time = tod_to_ns(time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* copy data to new structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) data->ssch_rsch_count = cmb->ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) data->sample_count = cmb->sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* time fields are converted to nanoseconds while copying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) data->device_connect_time = time_to_nsec(cmb->device_connect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) data->function_pending_time = time_to_nsec(cmb->function_pending_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) data->device_disconnect_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) time_to_nsec(cmb->device_disconnect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) data->control_unit_queuing_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) = time_to_nsec(cmb->control_unit_queuing_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) data->device_active_only_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) = time_to_nsec(cmb->device_active_only_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static void reset_cmb(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cmf_generic_reset(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int cmf_enabled(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) enabled = !!cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static struct attribute_group cmf_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static struct cmb_operations cmbops_basic = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .alloc = alloc_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) .free = free_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) .set = set_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) .read = read_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) .readall = readall_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) .reset = reset_cmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) .attr_group = &cmf_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* ******** extended cmb handling ********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * struct cmbe - extended channel measurement block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * @ssch_rsch_count: number of ssch and rsch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @sample_count: number of samples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @device_connect_time: time of device connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * @function_pending_time: time of function pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @device_disconnect_time: time of device disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @control_unit_queuing_time: time of control unit queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @device_active_only_time: time of device active only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * @device_busy_time: time of device busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * @initial_command_response_time: initial command response time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * @reserved: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * The measurement block as used by the hardware. May be in any 64 bit physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * The fields are described further in z/Architecture Principles of Operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * third edition, chapter 17.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct cmbe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u32 ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u32 sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u32 device_connect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u32 function_pending_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u32 device_disconnect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) u32 control_unit_queuing_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) u32 device_active_only_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u32 device_busy_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) u32 initial_command_response_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u32 reserved[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) } __packed __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static struct kmem_cache *cmbe_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int alloc_cmbe(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct cmbe *cmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!cmbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!cmb_data->last_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) cmb_data->size = sizeof(*cmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cmb_data->hw_block = cmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (cdev->private->cmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cdev->private->cmb = cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* activate global measurement if this is the first channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (list_empty(&cmb_area.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cmf_activate(NULL, CMF_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) kfree(cmb_data->last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) kfree(cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) kmem_cache_free(cmbe_cache, cmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static void free_cmbe(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) cdev->private->cmb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (cmb_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) kfree(cmb_data->last_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) kmem_cache_free(cmbe_cache, cmb_data->hw_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) kfree(cmb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* deactivate global measurement if this is the last channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) list_del_init(&cdev->private->cmb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (list_empty(&cmb_area.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cmf_activate(NULL, CMF_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static int set_cmbe(struct ccw_device *cdev, u32 mme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned long mba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!cdev->private->cmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) mba = mme ? (unsigned long) cmb_data->hw_block : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return set_schib_wait(cdev, mme, 1, mba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static u64 read_cmbe(struct ccw_device *cdev, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct cmbe *cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) u64 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!cmb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) cmb = cmb_data->hw_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case avg_utilization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ret = __cmb_utilization(cmb->device_connect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cmb->function_pending_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) cmb->device_disconnect_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) cdev->private->cmb_start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case cmb_ssch_rsch_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ret = cmb->ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case cmb_sample_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ret = cmb->sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case cmb_device_connect_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) val = cmb->device_connect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) case cmb_function_pending_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) val = cmb->function_pending_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case cmb_device_disconnect_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) val = cmb->device_disconnect_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) case cmb_control_unit_queuing_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) val = cmb->control_unit_queuing_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) case cmb_device_active_only_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) val = cmb->device_active_only_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) case cmb_device_busy_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) val = cmb->device_busy_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) case cmb_initial_command_response_time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) val = cmb->initial_command_response_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ret = time_to_avg_nsec(val, cmb->sample_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct cmbe *cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct cmb_data *cmb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = cmf_cmb_copy_wait(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_lock_irqsave(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) cmb_data = cdev->private->cmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!cmb_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (cmb_data->last_update == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) time = cmb_data->last_update - cdev->private->cmb_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) memset (data, 0, sizeof(struct cmbdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* we only know values before device_busy_time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) data->size = offsetof(struct cmbdata, device_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) data->elapsed_time = tod_to_ns(time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) cmb = cmb_data->last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* copy data to new structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) data->ssch_rsch_count = cmb->ssch_rsch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) data->sample_count = cmb->sample_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* time fields are converted to nanoseconds while copying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) data->device_connect_time = time_to_nsec(cmb->device_connect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) data->function_pending_time = time_to_nsec(cmb->function_pending_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) data->device_disconnect_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) time_to_nsec(cmb->device_disconnect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) data->control_unit_queuing_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) = time_to_nsec(cmb->control_unit_queuing_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) data->device_active_only_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) = time_to_nsec(cmb->device_active_only_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) data->device_busy_time = time_to_nsec(cmb->device_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) data->initial_command_response_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) = time_to_nsec(cmb->initial_command_response_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) spin_unlock_irqrestore(cdev->ccwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static void reset_cmbe(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cmf_generic_reset(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static struct attribute_group cmf_attr_group_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static struct cmb_operations cmbops_extended = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) .alloc = alloc_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .free = free_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .set = set_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .read = read_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .readall = readall_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .reset = reset_cmbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .attr_group = &cmf_attr_group_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return sprintf(buf, "%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) (unsigned long long) cmf_read(to_ccwdev(dev), idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static ssize_t cmb_show_avg_sample_interval(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) long interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) count = cmf_read(cdev, cmb_sample_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) interval = get_tod_clock() - cdev->private->cmb_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) interval = tod_to_ns(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) interval /= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) interval = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return sprintf(buf, "%ld\n", interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static ssize_t cmb_show_avg_utilization(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #define cmf_attr(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static ssize_t show_##name(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct device_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) { return cmb_show_attr((dev), buf, cmb_##name); } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static DEVICE_ATTR(name, 0444, show_##name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #define cmf_attr_avg(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static ssize_t show_avg_##name(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct device_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) { return cmb_show_attr((dev), buf, cmb_##name); } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) cmf_attr(ssch_rsch_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) cmf_attr(sample_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) cmf_attr_avg(device_connect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) cmf_attr_avg(function_pending_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) cmf_attr_avg(device_disconnect_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) cmf_attr_avg(control_unit_queuing_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) cmf_attr_avg(device_active_only_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) cmf_attr_avg(device_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) cmf_attr_avg(initial_command_response_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static struct attribute *cmf_attributes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) &dev_attr_avg_sample_interval.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) &dev_attr_avg_utilization.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) &dev_attr_ssch_rsch_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) &dev_attr_sample_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) &dev_attr_avg_device_connect_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) &dev_attr_avg_function_pending_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) &dev_attr_avg_device_disconnect_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) &dev_attr_avg_control_unit_queuing_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) &dev_attr_avg_device_active_only_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static struct attribute_group cmf_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) .name = "cmf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) .attrs = cmf_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static struct attribute *cmf_attributes_ext[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) &dev_attr_avg_sample_interval.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) &dev_attr_avg_utilization.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) &dev_attr_ssch_rsch_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) &dev_attr_sample_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) &dev_attr_avg_device_connect_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) &dev_attr_avg_function_pending_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) &dev_attr_avg_device_disconnect_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) &dev_attr_avg_control_unit_queuing_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) &dev_attr_avg_device_active_only_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) &dev_attr_avg_device_busy_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) &dev_attr_avg_initial_command_response_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static struct attribute_group cmf_attr_group_ext = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .name = "cmf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) .attrs = cmf_attributes_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static ssize_t cmb_enable_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return sprintf(buf, "%d\n", cmf_enabled(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static ssize_t cmb_enable_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) size_t c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = kstrtoul(buf, 16, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ret = disable_cmf(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ret = enable_cmf(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return ret ? ret : c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) DEVICE_ATTR_RW(cmb_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) int ccw_set_cmf(struct ccw_device *cdev, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return cmbops->set(cdev, enable ? 2 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * enable_cmf() - switch on the channel measurement for a specific device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * @cdev: The ccw device to be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Enable channel measurements for @cdev. If this is called on a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * for which channel measurement is already enabled a reset of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * measurement data is triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Returns: %0 for success or a negative error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * non-atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int enable_cmf(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) device_lock(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (cmf_enabled(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cmbops->reset(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) get_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ret = cmbops->alloc(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) cmbops->reset(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) cmbops->free(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ret = cmbops->set(cdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) cmbops->free(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) device_unlock(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * __disable_cmf() - switch off the channel measurement for a specific device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * @cdev: The ccw device to be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * Returns: %0 for success or a negative error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * non-atomic, device_lock() held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int __disable_cmf(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ret = cmbops->set(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) cmbops->free(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * disable_cmf() - switch off the channel measurement for a specific device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * @cdev: The ccw device to be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * Returns: %0 for success or a negative error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * non-atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) int disable_cmf(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) device_lock(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = __disable_cmf(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) device_unlock(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * cmf_read() - read one value from the current channel measurement block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * @cdev: the channel to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * @index: the index of the value to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Returns: The value read or %0 if the value cannot be read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) u64 cmf_read(struct ccw_device *cdev, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return cmbops->read(cdev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * cmf_readall() - read the current channel measurement block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * @cdev: the channel to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * @data: a pointer to a data block that will be filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Returns: %0 on success, a negative error value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return cmbops->readall(cdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Reenable cmf when a disconnected device becomes available again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int cmf_reenable(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) cmbops->reset(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return cmbops->set(cdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * cmf_reactivate() - reactivate measurement block updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * Use this during resume from hibernate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) void cmf_reactivate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) spin_lock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!list_empty(&cmb_area.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) cmf_activate(cmb_area.mem, CMF_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) spin_unlock(&cmb_area.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int __init init_cmbe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) __alignof__(struct cmbe), 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return cmbe_cache ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int __init init_cmf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) char *format_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) char *detect_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * If the user did not give a parameter, see if we are running on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * machine supporting extended measurement blocks, otherwise fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * to basic mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (format == CMF_AUTODETECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (!css_general_characteristics.ext_mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) format = CMF_BASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) format = CMF_EXTENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) detect_string = "autodetected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) detect_string = "parameter";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) switch (format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) case CMF_BASIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) format_string = "basic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) cmbops = &cmbops_basic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) case CMF_EXTENDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) format_string = "extended";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) cmbops = &cmbops_extended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ret = init_cmbe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pr_info("Channel measurement facility initialized using format "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) "%s (mode %s)\n", format_string, detect_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) device_initcall(init_cmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) EXPORT_SYMBOL_GPL(enable_cmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) EXPORT_SYMBOL_GPL(disable_cmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) EXPORT_SYMBOL_GPL(cmf_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) EXPORT_SYMBOL_GPL(cmf_readall);