Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Greybus bundles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2014-2015 Google Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2014-2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/greybus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "greybus_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) static ssize_t bundle_class_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 				 struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	return sprintf(buf, "0x%02x\n", bundle->class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static DEVICE_ATTR_RO(bundle_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static ssize_t bundle_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	return sprintf(buf, "%u\n", bundle->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static DEVICE_ATTR_RO(bundle_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static ssize_t state_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 			  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	if (!bundle->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		return sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	return sprintf(buf, "%s\n", bundle->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static ssize_t state_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			   const char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	kfree(bundle->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	bundle->state = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (!bundle->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* Tell userspace that the file contents changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	sysfs_notify(&bundle->dev.kobj, NULL, "state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static DEVICE_ATTR_RW(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static struct attribute *bundle_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	&dev_attr_bundle_class.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	&dev_attr_bundle_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	&dev_attr_state.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) ATTRIBUTE_GROUPS(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 					u8 bundle_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct gb_bundle *bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	list_for_each_entry(bundle, &intf->bundles, links) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		if (bundle->id == bundle_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static void gb_bundle_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	trace_gb_bundle_release(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	kfree(bundle->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	kfree(bundle->cport_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	kfree(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	list_for_each_entry(connection, &bundle->connections, bundle_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		gb_connection_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	list_for_each_entry(connection, &bundle->connections, bundle_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		gb_connection_enable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int gb_bundle_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	const struct dev_pm_ops *pm = dev->driver->pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (pm && pm->runtime_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		ret = pm->runtime_suspend(&bundle->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		gb_bundle_disable_all_connections(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (pm && pm->runtime_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			ret = pm->runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			gb_bundle_enable_all_connections(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int gb_bundle_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct gb_bundle *bundle = to_gb_bundle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	const struct dev_pm_ops *pm = dev->driver->pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (pm && pm->runtime_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		ret = pm->runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		gb_bundle_enable_all_connections(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int gb_bundle_idle(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	pm_runtime_mark_last_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	pm_request_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static const struct dev_pm_ops gb_bundle_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct device_type greybus_bundle_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	.name =		"greybus_bundle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	.release =	gb_bundle_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	.pm =		&gb_bundle_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * Create a gb_bundle structure to represent a discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * bundle.  Returns a pointer to the new bundle or a null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * pointer if a failure occurs due to memory exhaustion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 				   u8 class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct gb_bundle *bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (bundle_id == BUNDLE_ID_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	 * Reject any attempt to reuse a bundle id.  We initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * these serially, so there's no need to worry about keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * the interface bundle list locked here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (gb_bundle_find(intf, bundle_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (!bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	bundle->intf = intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	bundle->id = bundle_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	bundle->class = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	INIT_LIST_HEAD(&bundle->connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	bundle->dev.parent = &intf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	bundle->dev.bus = &greybus_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	bundle->dev.type = &greybus_bundle_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	bundle->dev.groups = bundle_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	bundle->dev.dma_mask = intf->dev.dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	device_initialize(&bundle->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	list_add(&bundle->links, &intf->bundles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	trace_gb_bundle_create(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int gb_bundle_add(struct gb_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	ret = device_add(&bundle->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	trace_gb_bundle_add(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * Tear down a previously set up bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void gb_bundle_destroy(struct gb_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	trace_gb_bundle_destroy(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (device_is_registered(&bundle->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		device_del(&bundle->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	list_del(&bundle->links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	put_device(&bundle->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }