Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2010, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *   Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *   Hank Janssen  <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/ptp_clock_kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <clocksource/hyperv_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "hyperv_vmbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define SD_MAJOR	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define SD_MINOR	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define SD_MINOR_1	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define SD_MINOR_2	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define SD_VERSION_3_1	(SD_MAJOR << 16 | SD_MINOR_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define SD_VERSION_3_2	(SD_MAJOR << 16 | SD_MINOR_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define SD_VERSION	(SD_MAJOR << 16 | SD_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SD_MAJOR_1	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define SD_VERSION_1	(SD_MAJOR_1 << 16 | SD_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define TS_MAJOR	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define TS_MINOR	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define TS_VERSION	(TS_MAJOR << 16 | TS_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define TS_MAJOR_1	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define TS_VERSION_1	(TS_MAJOR_1 << 16 | TS_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define TS_MAJOR_3	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define TS_VERSION_3	(TS_MAJOR_3 << 16 | TS_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define HB_MAJOR	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define HB_MINOR	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define HB_VERSION	(HB_MAJOR << 16 | HB_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define HB_MAJOR_1	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define HB_VERSION_1	(HB_MAJOR_1 << 16 | HB_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static int sd_srv_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static int ts_srv_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static int hb_srv_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define SD_VER_COUNT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static const int sd_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	SD_VERSION_3_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	SD_VERSION_3_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	SD_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	SD_VERSION_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define TS_VER_COUNT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static const int ts_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	TS_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	TS_VERSION_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	TS_VERSION_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define HB_VER_COUNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static const int hb_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	HB_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	HB_VERSION_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define FW_VER_COUNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static const int fw_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	UTIL_FW_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	UTIL_WS2K8_FW_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * Send the "hibernate" udev event in a thread context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) struct hibernate_work_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct hv_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static struct hibernate_work_context hibernate_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static bool hibernation_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void send_hibernate_uevent(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	char *uevent_env[2] = { "EVENT=hibernate", NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct hibernate_work_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	ctx = container_of(work, struct hibernate_work_context, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	pr_info("Sent hibernation uevent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int hv_shutdown_init(struct hv_util_service *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct vmbus_channel *channel = srv->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	hibernate_context.dev = channel->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	hibernation_supported = hv_is_hibernation_supported();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void shutdown_onchannelcallback(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static struct hv_util_service util_shutdown = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	.util_cb = shutdown_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	.util_init = hv_shutdown_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int hv_timesync_init(struct hv_util_service *srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int hv_timesync_pre_suspend(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void hv_timesync_deinit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void timesync_onchannelcallback(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static struct hv_util_service util_timesynch = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	.util_cb = timesync_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	.util_init = hv_timesync_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.util_pre_suspend = hv_timesync_pre_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.util_deinit = hv_timesync_deinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void heartbeat_onchannelcallback(void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct hv_util_service util_heartbeat = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	.util_cb = heartbeat_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static struct hv_util_service util_kvp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.util_cb = hv_kvp_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	.util_init = hv_kvp_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	.util_pre_suspend = hv_kvp_pre_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	.util_pre_resume = hv_kvp_pre_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	.util_deinit = hv_kvp_deinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct hv_util_service util_vss = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	.util_cb = hv_vss_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	.util_init = hv_vss_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	.util_pre_suspend = hv_vss_pre_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	.util_pre_resume = hv_vss_pre_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	.util_deinit = hv_vss_deinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static struct hv_util_service util_fcopy = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	.util_cb = hv_fcopy_onchannelcallback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	.util_init = hv_fcopy_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	.util_pre_suspend = hv_fcopy_pre_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	.util_pre_resume = hv_fcopy_pre_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	.util_deinit = hv_fcopy_deinit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void perform_shutdown(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	orderly_poweroff(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void perform_restart(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	orderly_reboot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * Perform the shutdown operation in a thread context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static DECLARE_WORK(shutdown_work, perform_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * Perform the restart operation in a thread context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static DECLARE_WORK(restart_work, perform_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void shutdown_onchannelcallback(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct vmbus_channel *channel = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct work_struct *work = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	u32 recvlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	u64 requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	u8  *shut_txf_buf = util_shutdown.recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct shutdown_msg_data *shutdown_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct icmsg_hdr *icmsghdrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	vmbus_recvpacket(channel, shut_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (recvlen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			sizeof(struct vmbuspipe_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 					fw_versions, FW_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 					sd_versions, SD_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 					NULL, &sd_srv_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				pr_info("Shutdown IC version %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 					sd_srv_version >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 					sd_srv_version & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			shutdown_msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				(struct shutdown_msg_data *)&shut_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 					sizeof(struct vmbuspipe_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 					sizeof(struct icmsg_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			 * shutdown_msg->flags can be 0(shut down), 2(reboot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			 * or 4(hibernate). It may bitwise-OR 1, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			 * performing the request by force. Linux always tries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			 * to perform the request by force.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			switch (shutdown_msg->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				icmsghdrp->status = HV_S_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 				work = &shutdown_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 				pr_info("Shutdown request received -"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 					    " graceful shutdown initiated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				icmsghdrp->status = HV_S_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 				work = &restart_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 				pr_info("Restart request received -"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 					    " graceful restart initiated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 				pr_info("Hibernation request received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 				icmsghdrp->status = hibernation_supported ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 					HV_S_OK : HV_E_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				if (hibernation_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 					work = &hibernate_context.work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				icmsghdrp->status = HV_E_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				pr_info("Shutdown request received -"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 					    " Invalid request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			| ICMSGHDRFLAG_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		vmbus_sendpacket(channel, shut_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				       recvlen, requestid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				       VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		schedule_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * Set the host time in a process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static struct work_struct adj_time_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * The last time sample, received from the host. PTP device responds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * requests by using this data and the current partition-wide time reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	u64				host_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	u64				ref_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } host_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline u64 reftime_to_ns(u64 reftime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return (reftime - WLTIMEDELTA) * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Hard coded threshold for host timesync delay: 600 seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int hv_get_adj_host_time(struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	u64 newtime, reftime, timediff_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	spin_lock_irqsave(&host_ts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	reftime = hv_read_reference_counter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 * We need to let the caller know that last update from host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * is older than the max allowable threshold. clock_gettime()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * and PTP ioctl do not have a documented error that we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * return for this specific case. Use ESTALE to report this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	timediff_adj = reftime - host_ts.ref_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			     (timediff_adj * 100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		ret = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	newtime = host_ts.host_time + timediff_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	*ts = ns_to_timespec64(reftime_to_ns(newtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	spin_unlock_irqrestore(&host_ts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void hv_set_host_time(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!hv_get_adj_host_time(&ts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		do_settimeofday64(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * Synchronize time with host after reboot, restore, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * message after the timesync channel is opened. Since the hv_utils module is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * loaded after hv_vmbus, the first message is usually missed. This bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * considered a hard request to discipline the clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * typically used as a hint to the guest. The guest is under no obligation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * to discipline the clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	u64 cur_reftime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	 * Save the adjusted time sample from the host and the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 * of the current system time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	spin_lock_irqsave(&host_ts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	cur_reftime = hv_read_reference_counter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	host_ts.host_time = hosttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	host_ts.ref_time = cur_reftime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	 * TimeSync v4 messages contain reference time (guest's Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	 * clocksource read when the time sample was generated), we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	 * improve the precision by adding the delta between now and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * time of generation. For older protocols we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * reftime == cur_reftime on call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	host_ts.host_time += (cur_reftime - reftime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	spin_unlock_irqrestore(&host_ts.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	/* Schedule work to do do_settimeofday64() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (adj_flags & ICTIMESYNCFLAG_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		schedule_work(&adj_time_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * Time Sync Channel message handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void timesync_onchannelcallback(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct vmbus_channel *channel = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	u32 recvlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	u64 requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct icmsg_hdr *icmsghdrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct ictimesync_data *timedatap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct ictimesync_ref_data *refdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	u8 *time_txf_buf = util_timesynch.recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * Drain the ring buffer and use the last packet to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * host_ts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		int ret = vmbus_recvpacket(channel, time_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 					   HV_HYP_PAGE_SIZE, &recvlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 					   &requestid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				     ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		if (!recvlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				sizeof(struct vmbuspipe_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 						fw_versions, FW_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 						ts_versions, TS_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 						NULL, &ts_srv_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 				pr_info("TimeSync IC version %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 					ts_srv_version >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 					ts_srv_version & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			if (ts_srv_version > TS_VERSION_3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				refdata = (struct ictimesync_ref_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 					&time_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 					sizeof(struct vmbuspipe_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 					sizeof(struct icmsg_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 				adj_guesttime(refdata->parenttime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 						refdata->vmreferencetime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 						refdata->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 				timedatap = (struct ictimesync_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 					&time_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 					sizeof(struct vmbuspipe_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 					sizeof(struct icmsg_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 				adj_guesttime(timedatap->parenttime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 					      hv_read_reference_counter(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 					      timedatap->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			| ICMSGHDRFLAG_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		vmbus_sendpacket(channel, time_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 				recvlen, requestid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * Heartbeat functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * Every two seconds, Hyper-V send us a heartbeat request message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  * we respond to this message, and Hyper-V knows we are alive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void heartbeat_onchannelcallback(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct vmbus_channel *channel = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	u32 recvlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	u64 requestid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct icmsg_hdr *icmsghdrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	struct heartbeat_msg_data *heartbeat_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		vmbus_recvpacket(channel, hbeat_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 				 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		if (!recvlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 				sizeof(struct vmbuspipe_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			if (vmbus_prep_negotiate_resp(icmsghdrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 					hbeat_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 					fw_versions, FW_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 					hb_versions, HB_VER_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 					NULL, &hb_srv_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				pr_info("Heartbeat IC version %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 					hb_srv_version >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 					hb_srv_version & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			heartbeat_msg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 				(struct heartbeat_msg_data *)&hbeat_txf_buf[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 					sizeof(struct vmbuspipe_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 					sizeof(struct icmsg_hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			heartbeat_msg->seq_num += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			| ICMSGHDRFLAG_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		vmbus_sendpacket(channel, hbeat_txf_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				       recvlen, requestid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				       VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int util_probe(struct hv_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			const struct hv_vmbus_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct hv_util_service *srv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		(struct hv_util_service *)dev_id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (!srv->recv_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	srv->channel = dev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (srv->util_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		ret = srv->util_init(srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 * The set of services managed by the util driver are not performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 * critical and do not need batched reading. Furthermore, some services
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	 * such as KVP can only handle one message from the host at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	 * Turn off batched reading for all util drivers before we open the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	 * channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	hv_set_drvdata(dev, srv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			 dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	if (srv->util_deinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		srv->util_deinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	kfree(srv->recv_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int util_remove(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct hv_util_service *srv = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	if (srv->util_deinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		srv->util_deinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	kfree(srv->recv_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  * When we're in util_suspend(), all the userspace processes have been frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  * after the whole resume procedure, including util_resume(), finishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int util_suspend(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	struct hv_util_service *srv = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	if (srv->util_pre_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		ret = srv->util_pre_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	vmbus_close(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static int util_resume(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	struct hv_util_service *srv = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	if (srv->util_pre_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		ret = srv->util_pre_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			 dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static const struct hv_vmbus_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	/* Shutdown guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	{ HV_SHUTDOWN_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	  .driver_data = (unsigned long)&util_shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	/* Time synch guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	{ HV_TS_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	  .driver_data = (unsigned long)&util_timesynch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	/* Heartbeat guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	{ HV_HEART_BEAT_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	  .driver_data = (unsigned long)&util_heartbeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	/* KVP guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	{ HV_KVP_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	  .driver_data = (unsigned long)&util_kvp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	/* VSS GUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	{ HV_VSS_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	  .driver_data = (unsigned long)&util_vss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	/* File copy GUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	{ HV_FCOPY_GUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	  .driver_data = (unsigned long)&util_fcopy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) MODULE_DEVICE_TABLE(vmbus, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* The one and only one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static  struct hv_driver util_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	.name = "hv_utils",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	.id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	.probe =  util_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	.remove =  util_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	.suspend = util_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	.resume =  util_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int hv_ptp_enable(struct ptp_clock_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 			 struct ptp_clock_request *request, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	return hv_get_adj_host_time(ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static struct ptp_clock_info ptp_hyperv_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	.name		= "hyperv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	.enable         = hv_ptp_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	.adjtime        = hv_ptp_adjtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	.adjfreq        = hv_ptp_adjfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	.gettime64      = hv_ptp_gettime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	.settime64      = hv_ptp_settime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static struct ptp_clock *hv_ptp_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int hv_timesync_init(struct hv_util_service *srv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	/* TimeSync requires Hyper-V clocksource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	if (!hv_read_reference_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	spin_lock_init(&host_ts.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	INIT_WORK(&adj_time_work, hv_set_host_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	 * disabled but the driver is still useful without the PTP device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	 * as it still handles the ICTIMESYNCFLAG_SYNC case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (IS_ERR_OR_NULL(hv_ptp_clock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		pr_err("cannot register PTP clock: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		       PTR_ERR_OR_ZERO(hv_ptp_clock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		hv_ptp_clock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void hv_timesync_cancel_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	cancel_work_sync(&adj_time_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int hv_timesync_pre_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	hv_timesync_cancel_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static void hv_timesync_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	if (hv_ptp_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		ptp_clock_unregister(hv_ptp_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	hv_timesync_cancel_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int __init init_hyperv_utils(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	pr_info("Registering HyperV Utility Driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	return vmbus_driver_register(&util_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void exit_hyperv_utils(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	pr_info("De-Registered HyperV Utility Driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	vmbus_driver_unregister(&util_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) module_init(init_hyperv_utils);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) module_exit(exit_hyperv_utils);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) MODULE_DESCRIPTION("Hyper-V Utilities");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) MODULE_LICENSE("GPL");