Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * SVC Greybus driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2015 Google Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright 2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/greybus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define SVC_INTF_EJECT_TIMEOUT		9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #define SVC_INTF_ACTIVATE_TIMEOUT	6000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define SVC_INTF_RESUME_TIMEOUT		3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) struct gb_svc_deferred_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	struct gb_operation *operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static int gb_svc_queue_deferred_request(struct gb_operation *operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static ssize_t endo_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 			    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	return sprintf(buf, "0x%04x\n", svc->endo_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static DEVICE_ATTR_RO(endo_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static ssize_t ap_intf_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 			       struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	return sprintf(buf, "%u\n", svc->ap_intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static DEVICE_ATTR_RO(ap_intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) // FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) // This is a hack, we need to do this "right" and clean the interface up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) // properly, not just forcibly yank the thing out of the system and hope for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) // best.  But for now, people want their modules to come out without having to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) // throw the thing to the ground or get out a screwdriver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static ssize_t intf_eject_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 				struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 				size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	unsigned short intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	ret = kstrtou16(buf, 10, &intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	ret = gb_svc_intf_eject(svc, intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static DEVICE_ATTR_WO(intf_eject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 			     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	return sprintf(buf, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		       gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static ssize_t watchdog_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 			      struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			      size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	bool user_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	retval = strtobool(buf, &user_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	if (user_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		retval = gb_svc_watchdog_enable(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		retval = gb_svc_watchdog_disable(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static DEVICE_ATTR_RW(watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static ssize_t watchdog_action_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 				    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return sprintf(buf, "panic\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		return sprintf(buf, "reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static ssize_t watchdog_action_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				     struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 				     const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (sysfs_streq(buf, "panic"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	else if (sysfs_streq(buf, "reset"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static DEVICE_ATTR_RW(watchdog_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct gb_svc_pwrmon_rail_count_get_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	ret = gb_operation_sync(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 				GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	*value = response.rail_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		struct gb_svc_pwrmon_rail_names_get_response *response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	ret = gb_operation_sync(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 				response, bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (response->status != GB_SVC_OP_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			"SVC error while getting rail names: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			response->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				    u8 measurement_type, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct gb_svc_pwrmon_sample_get_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	struct gb_svc_pwrmon_sample_get_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	request.rail_id = rail_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	request.measurement_type = measurement_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (response.result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			"UniPro error while getting rail power sample (%d %d): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			rail_id, measurement_type, response.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		switch (response.result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			return -ENOMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	*value = le32_to_cpu(response.measurement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 				  u8 measurement_type, u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	struct gb_svc_pwrmon_intf_sample_get_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct gb_svc_pwrmon_intf_sample_get_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	request.measurement_type = measurement_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	ret = gb_operation_sync(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 				GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (response.result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			"UniPro error while getting intf power sample (%d %d): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			intf_id, measurement_type, response.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		switch (response.result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			return -ENOMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	*value = le32_to_cpu(response.measurement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static struct attribute *svc_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	&dev_attr_endo_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	&dev_attr_ap_intf_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	&dev_attr_intf_eject.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	&dev_attr_watchdog.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	&dev_attr_watchdog_action.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) ATTRIBUTE_GROUPS(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct gb_svc_intf_device_id_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	request.device_id = device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				 &request, sizeof(request), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct gb_svc_intf_eject_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * The pulse width for module release in svc is long so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 * increase the timeout so the operation will not return to soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	ret = gb_operation_sync_timeout(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 					GB_SVC_TYPE_INTF_EJECT, &request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 					sizeof(request), NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 					SVC_INTF_EJECT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct gb_svc_intf_vsys_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct gb_svc_intf_vsys_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	int type, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	ret = gb_operation_sync(svc->connection, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (response.result_code != GB_SVC_INTF_VSYS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct gb_svc_intf_refclk_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	struct gb_svc_intf_refclk_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	int type, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	ret = gb_operation_sync(svc->connection, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (response.result_code != GB_SVC_INTF_REFCLK_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct gb_svc_intf_unipro_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct gb_svc_intf_unipro_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	int type, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	ret = gb_operation_sync(svc->connection, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct gb_svc_intf_activate_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct gb_svc_intf_activate_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	ret = gb_operation_sync_timeout(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 					GB_SVC_TYPE_INTF_ACTIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 					&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 					&response, sizeof(response),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 					SVC_INTF_ACTIVATE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (response.status != GB_SVC_OP_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		dev_err(&svc->dev, "failed to activate interface %u: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			intf_id, response.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	*intf_type = response.intf_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	struct gb_svc_intf_resume_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct gb_svc_intf_resume_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	ret = gb_operation_sync_timeout(svc->connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 					GB_SVC_TYPE_INTF_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 					&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 					&response, sizeof(response),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 					SVC_INTF_RESUME_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			intf_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (response.status != GB_SVC_OP_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		dev_err(&svc->dev, "failed to resume interface %u: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			intf_id, response.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			u32 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct gb_svc_dme_peer_get_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	struct gb_svc_dme_peer_get_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	u16 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	request.attr = cpu_to_le16(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	request.selector = cpu_to_le16(selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			intf_id, attr, selector, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	result = le16_to_cpu(response.result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			intf_id, attr, selector, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		*value = le32_to_cpu(response.attr_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct gb_svc_dme_peer_set_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct gb_svc_dme_peer_set_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	u16 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	request.attr = cpu_to_le16(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	request.selector = cpu_to_le16(selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	request.value = cpu_to_le32(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			intf_id, attr, selector, value, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	result = le16_to_cpu(response.result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			intf_id, attr, selector, value, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) int gb_svc_connection_create(struct gb_svc *svc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			     u8 intf1_id, u16 cport1_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			     u8 intf2_id, u16 cport2_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			     u8 cport_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct gb_svc_conn_create_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	request.intf1_id = intf1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	request.cport1_id = cpu_to_le16(cport1_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	request.intf2_id = intf2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	request.cport2_id = cpu_to_le16(cport2_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	request.tc = 0;		/* TC0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	request.flags = cport_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 				 &request, sizeof(request), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			       u8 intf2_id, u16 cport2_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct gb_svc_conn_destroy_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct gb_connection *connection = svc->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	request.intf1_id = intf1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	request.cport1_id = cpu_to_le16(cport1_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	request.intf2_id = intf2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	request.cport2_id = cpu_to_le16(cport2_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				&request, sizeof(request), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			intf1_id, cport1_id, intf2_id, cport2_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) /* Creates bi-directional routes between the devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			u8 intf2_id, u8 dev2_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct gb_svc_route_create_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	request.intf1_id = intf1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	request.dev1_id = dev1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	request.intf2_id = intf2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	request.dev2_id = dev2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				 &request, sizeof(request), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) /* Destroys bi-directional routes between the devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct gb_svc_route_destroy_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	request.intf1_id = intf1_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	request.intf2_id = intf2_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 				&request, sizeof(request), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			intf1_id, intf2_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			       u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			       u8 tx_amplitude, u8 tx_hs_equalizer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			       u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			       u8 flags, u32 quirks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			       struct gb_svc_l2_timer_cfg *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			       struct gb_svc_l2_timer_cfg *remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct gb_svc_intf_set_pwrm_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct gb_svc_intf_set_pwrm_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	u16 result_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	request.hs_series = hs_series;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	request.tx_mode = tx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	request.tx_gear = tx_gear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	request.tx_nlanes = tx_nlanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	request.tx_amplitude = tx_amplitude;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	request.tx_hs_equalizer = tx_hs_equalizer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	request.rx_mode = rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	request.rx_gear = rx_gear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	request.rx_nlanes = rx_nlanes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	request.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	request.quirks = cpu_to_le32(quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		request.local_l2timerdata = *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		request.remote_l2timerdata = *remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	result_code = response.result_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		dev_err(&svc->dev, "set power mode = %d\n", result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct gb_svc_intf_set_pwrm_request request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct gb_svc_intf_set_pwrm_response response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	u16 result_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	request.intf_id = intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				&request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				&response, sizeof(response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			"failed to send set power mode operation to interface %u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			intf_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	result_code = response.result_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (result_code != GB_SVC_SETPWRM_PWR_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			"failed to hibernate the link for interface %u: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			intf_id, result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) int gb_svc_ping(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 					 NULL, 0, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 					 GB_OPERATION_TIMEOUT_DEFAULT * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static int gb_svc_version_request(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct gb_connection *connection = op->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	struct gb_svc_version_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct gb_svc_version_response *response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (op->request->payload_size < sizeof(*request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		dev_err(&svc->dev, "short version request (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			op->request->payload_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			sizeof(*request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	request = op->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (request->major > GB_SVC_VERSION_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			 request->major, GB_SVC_VERSION_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	svc->protocol_major = request->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	svc->protocol_minor = request->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	response = op->response->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	response->major = svc->protocol_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	response->minor = svc->protocol_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 					size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		file_inode(file)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct gb_svc *svc = pwrmon_rails->svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	int ret, desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	char buff[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 				       GB_SVC_PWRMON_TYPE_VOL, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			"failed to get voltage sample %u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			pwrmon_rails->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return simple_read_from_buffer(buf, len, offset, buff, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 					size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		file_inode(file)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct gb_svc *svc = pwrmon_rails->svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int ret, desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	char buff[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 				       GB_SVC_PWRMON_TYPE_CURR, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		dev_err(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			"failed to get current sample %u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			pwrmon_rails->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	return simple_read_from_buffer(buf, len, offset, buff, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				      size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		file_inode(file)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct gb_svc *svc = pwrmon_rails->svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	int ret, desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	char buff[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				       GB_SVC_PWRMON_TYPE_PWR, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		dev_err(&svc->dev, "failed to get power sample %u: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			pwrmon_rails->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return simple_read_from_buffer(buf, len, offset, buff, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static const struct file_operations pwrmon_debugfs_voltage_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	.read		= pwr_debugfs_voltage_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) static const struct file_operations pwrmon_debugfs_current_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	.read		= pwr_debugfs_current_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static const struct file_operations pwrmon_debugfs_power_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	.read		= pwr_debugfs_power_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	size_t bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct dentry *dent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct gb_svc_pwrmon_rail_names_get_response *rail_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	u8 rail_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (IS_ERR_OR_NULL(dent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		goto err_pwrmon_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		goto err_pwrmon_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	bufsize = sizeof(*rail_names) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	rail_names = kzalloc(bufsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (!rail_names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		goto err_pwrmon_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (!svc->pwrmon_rails)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		goto err_pwrmon_debugfs_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		goto err_pwrmon_debugfs_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	for (i = 0; i < rail_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		snprintf(fname, sizeof(fname), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			 (char *)&rail_names->name[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		rail->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		rail->svc = svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		dir = debugfs_create_dir(fname, dent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		debugfs_create_file("voltage_now", 0444, dir, rail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				    &pwrmon_debugfs_voltage_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		debugfs_create_file("current_now", 0444, dir, rail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				    &pwrmon_debugfs_current_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		debugfs_create_file("power_now", 0444, dir, rail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 				    &pwrmon_debugfs_power_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	kfree(rail_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) err_pwrmon_debugfs_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	kfree(rail_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	kfree(svc->pwrmon_rails);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	svc->pwrmon_rails = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) err_pwrmon_debugfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	debugfs_remove(dent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static void gb_svc_debugfs_init(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 						 gb_debugfs_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	gb_svc_pwrmon_debugfs_init(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static void gb_svc_debugfs_exit(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	debugfs_remove_recursive(svc->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	kfree(svc->pwrmon_rails);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	svc->pwrmon_rails = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) static int gb_svc_hello(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct gb_connection *connection = op->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct gb_svc_hello_request *hello_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (op->request->payload_size < sizeof(*hello_request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			 op->request->payload_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			 sizeof(*hello_request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	hello_request = op->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	svc->endo_id = le16_to_cpu(hello_request->endo_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	svc->ap_intf_id = hello_request->interface_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	ret = device_add(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	ret = gb_svc_watchdog_create(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		goto err_unregister_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	gb_svc_debugfs_init(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	ret = gb_svc_queue_deferred_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		goto err_remove_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) err_remove_debugfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	gb_svc_debugfs_exit(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) err_unregister_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	gb_svc_watchdog_destroy(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	device_del(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 						    u8 intf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct gb_host_device *hd = svc->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct gb_module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	size_t num_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	u8 module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	list_for_each_entry(module, &hd->modules, hd_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		module_id = module->module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		num_interfaces = module->num_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		if (intf_id >= module_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		    intf_id < module_id + num_interfaces) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			return module->interfaces[intf_id - module_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct gb_host_device *hd = svc->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	struct gb_module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	list_for_each_entry(module, &hd->modules, hd_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (module->module_id == module_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			return module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void gb_svc_process_hello_deferred(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	struct gb_connection *connection = operation->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 * module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * The code should be removed once SW-2217, Heuristic for UniPro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * Power Mode Changes is resolved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 					 GB_SVC_UNIPRO_HS_SERIES_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 					 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 					 2, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 					 GB_SVC_SMALL_AMPLITUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 					 GB_SVC_NO_DE_EMPHASIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 					 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 					 2, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 					 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 					 NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		dev_warn(&svc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			 "power mode change failed on AP to switch link: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			 ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static void gb_svc_process_module_inserted(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct gb_svc_module_inserted_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct gb_connection *connection = operation->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct gb_host_device *hd = svc->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct gb_module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	size_t num_interfaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	u8 module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	/* The request message size has already been verified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	request = operation->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	module_id = request->primary_intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	num_interfaces = request->intf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	flags = le16_to_cpu(request->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		__func__, module_id, num_interfaces, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		dev_warn(&svc->dev, "no primary interface detected on module %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			 module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	module = gb_svc_module_lookup(svc, module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (module) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			 module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	module = gb_module_create(hd, module_id, num_interfaces);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (!module) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		dev_err(&svc->dev, "failed to create module\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ret = gb_module_add(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		gb_module_put(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	list_add(&module->hd_node, &hd->modules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static void gb_svc_process_module_removed(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct gb_svc_module_removed_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct gb_connection *connection = operation->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct gb_module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	u8 module_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* The request message size has already been verified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	request = operation->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	module_id = request->primary_intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	module = gb_svc_module_lookup(svc, module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	if (!module) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		dev_warn(&svc->dev, "unexpected module-removed event %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			 module_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	module->disconnected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	gb_module_del(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	list_del(&module->hd_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	gb_module_put(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void gb_svc_process_intf_oops(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct gb_svc_intf_oops_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct gb_connection *connection = operation->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct gb_interface *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	u8 intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	u8 reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/* The request message size has already been verified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	request = operation->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	intf_id = request->intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	reason = request->reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	intf = gb_svc_interface_lookup(svc, intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (!intf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			 intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 intf_id, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	mutex_lock(&intf->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	intf->disconnected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	gb_interface_disable(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	gb_interface_deactivate(intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	mutex_unlock(&intf->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct gb_svc_intf_mailbox_event_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct gb_connection *connection = operation->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	struct gb_interface *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	u8 intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	u16 result_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	u32 mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/* The request message size has already been verified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	request = operation->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	intf_id = request->intf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	result_code = le16_to_cpu(request->result_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	mailbox = le32_to_cpu(request->mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		__func__, intf_id, result_code, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	intf = gb_svc_interface_lookup(svc, intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (!intf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	gb_interface_mailbox_event(intf, result_code, mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void gb_svc_process_deferred_request(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct gb_svc_deferred_request *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	struct gb_operation *operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct gb_svc *svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	dr = container_of(work, struct gb_svc_deferred_request, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	operation = dr->operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	svc = gb_connection_get_data(operation->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	type = operation->request->header->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	case GB_SVC_TYPE_SVC_HELLO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		gb_svc_process_hello_deferred(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	case GB_SVC_TYPE_MODULE_INSERTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		gb_svc_process_module_inserted(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	case GB_SVC_TYPE_MODULE_REMOVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		gb_svc_process_module_removed(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		gb_svc_process_intf_mailbox_event(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	case GB_SVC_TYPE_INTF_OOPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		gb_svc_process_intf_oops(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	gb_operation_put(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int gb_svc_queue_deferred_request(struct gb_operation *operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct gb_svc *svc = gb_connection_get_data(operation->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct gb_svc_deferred_request *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	dr = kmalloc(sizeof(*dr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (!dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	gb_operation_get(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	dr->operation = operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	INIT_WORK(&dr->work, gb_svc_process_deferred_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	queue_work(svc->wq, &dr->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static int gb_svc_intf_reset_recv(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	struct gb_svc *svc = gb_connection_get_data(op->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct gb_message *request = op->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	struct gb_svc_intf_reset_request *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (request->payload_size < sizeof(*reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			 request->payload_size, sizeof(*reset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	reset = request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	/* FIXME Reset the interface here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int gb_svc_module_inserted_recv(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct gb_svc *svc = gb_connection_get_data(op->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	struct gb_svc_module_inserted_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (op->request->payload_size < sizeof(*request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			 op->request->payload_size, sizeof(*request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	request = op->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		request->primary_intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	return gb_svc_queue_deferred_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int gb_svc_module_removed_recv(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct gb_svc *svc = gb_connection_get_data(op->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct gb_svc_module_removed_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (op->request->payload_size < sizeof(*request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			 op->request->payload_size, sizeof(*request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	request = op->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		request->primary_intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	return gb_svc_queue_deferred_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int gb_svc_intf_oops_recv(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct gb_svc *svc = gb_connection_get_data(op->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct gb_svc_intf_oops_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (op->request->payload_size < sizeof(*request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			 op->request->payload_size, sizeof(*request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	return gb_svc_queue_deferred_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	struct gb_svc *svc = gb_connection_get_data(op->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct gb_svc_intf_mailbox_event_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	if (op->request->payload_size < sizeof(*request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			 op->request->payload_size, sizeof(*request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	request = op->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	return gb_svc_queue_deferred_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int gb_svc_request_handler(struct gb_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct gb_connection *connection = op->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct gb_svc *svc = gb_connection_get_data(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	u8 type = op->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 * SVC requests need to follow a specific order (at least initially) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * below code takes care of enforcing that. The expected order is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * - PROTOCOL_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * - SVC_HELLO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * - Any other request, but the earlier two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 * Incoming requests are guaranteed to be serialized and so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	 * need to protect 'state' for any races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	case GB_SVC_TYPE_PROTOCOL_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		if (svc->state != GB_SVC_STATE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	case GB_SVC_TYPE_SVC_HELLO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		if (svc->state != GB_SVC_STATE_SVC_HELLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			 type, svc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	case GB_SVC_TYPE_PROTOCOL_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		ret = gb_svc_version_request(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	case GB_SVC_TYPE_SVC_HELLO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		ret = gb_svc_hello(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			svc->state = GB_SVC_STATE_SVC_HELLO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	case GB_SVC_TYPE_INTF_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		return gb_svc_intf_reset_recv(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	case GB_SVC_TYPE_MODULE_INSERTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		return gb_svc_module_inserted_recv(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	case GB_SVC_TYPE_MODULE_REMOVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		return gb_svc_module_removed_recv(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return gb_svc_intf_mailbox_event_recv(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	case GB_SVC_TYPE_INTF_OOPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		return gb_svc_intf_oops_recv(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void gb_svc_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	struct gb_svc *svc = to_gb_svc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (svc->connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		gb_connection_destroy(svc->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	ida_destroy(&svc->device_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	destroy_workqueue(svc->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	kfree(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct device_type greybus_svc_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	.name		= "greybus_svc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	.release	= gb_svc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct gb_svc *gb_svc_create(struct gb_host_device *hd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct gb_svc *svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	svc = kzalloc(sizeof(*svc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (!svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (!svc->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		kfree(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	svc->dev.parent = &hd->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	svc->dev.bus = &greybus_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	svc->dev.type = &greybus_svc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	svc->dev.groups = svc_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	svc->dev.dma_mask = svc->dev.parent->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	device_initialize(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	ida_init(&svc->device_id_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	svc->state = GB_SVC_STATE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	svc->hd = hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 						      gb_svc_request_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (IS_ERR(svc->connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		dev_err(&svc->dev, "failed to create connection: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			PTR_ERR(svc->connection));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		goto err_put_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	gb_connection_set_data(svc->connection, svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	return svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) err_put_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	put_device(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int gb_svc_add(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * The SVC protocol is currently driven by the SVC, so the SVC device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * is added from the connection request handler when enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * information has been received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	ret = gb_connection_enable(svc->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static void gb_svc_remove_modules(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	struct gb_host_device *hd = svc->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	struct gb_module *module, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		gb_module_del(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		list_del(&module->hd_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		gb_module_put(module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) void gb_svc_del(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	gb_connection_disable_rx(svc->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	 * The SVC device may have been registered from the request handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (device_is_registered(&svc->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		gb_svc_debugfs_exit(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		gb_svc_watchdog_destroy(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		device_del(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	flush_workqueue(svc->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	gb_svc_remove_modules(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	gb_connection_disable(svc->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void gb_svc_put(struct gb_svc *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	put_device(&svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }