^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2017 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Dong Aisheng <aisheng.dong@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) for (i = 0; i < num_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) clks[i].id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) clks[i].clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) for (i = 0; i < num_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) of_property_read_string_index(np, "clock-names", i, &clks[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) clks[i].clk = of_clk_get(np, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (IS_ERR(clks[i].clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ret = PTR_ERR(clks[i].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) np, i, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) clks[i].clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) clk_bulk_put(i, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int __must_check of_clk_bulk_get_all(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct clk_bulk_data **clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct clk_bulk_data *clk_bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int num_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) num_clks = of_clk_get_parent_count(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!num_clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!clk_bulk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ret = of_clk_bulk_get(np, num_clks, clk_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) kfree(clk_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *clks = clk_bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return num_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) while (--num_clks >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) clk_put(clks[num_clks].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) clks[num_clks].clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) EXPORT_SYMBOL_GPL(clk_bulk_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int __clk_bulk_get(struct device *dev, int num_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct clk_bulk_data *clks, bool optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) for (i = 0; i < num_clks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) clks[i].clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) for (i = 0; i < num_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) clks[i].clk = clk_get(dev, clks[i].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (IS_ERR(clks[i].clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ret = PTR_ERR(clks[i].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) clks[i].clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (ret == -ENOENT && optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dev_err(dev, "Failed to get clk '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) clks[i].id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) clk_bulk_put(i, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int __must_check clk_bulk_get(struct device *dev, int num_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return __clk_bulk_get(dev, num_clks, clks, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) EXPORT_SYMBOL(clk_bulk_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return __clk_bulk_get(dev, num_clks, clks, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL_GPL(clk_bulk_get_optional);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (IS_ERR_OR_NULL(clks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) clk_bulk_put(num_clks, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) kfree(clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) EXPORT_SYMBOL(clk_bulk_put_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int __must_check clk_bulk_get_all(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct clk_bulk_data **clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct device_node *np = dev_of_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return of_clk_bulk_get_all(np, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(clk_bulk_get_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifdef CONFIG_HAVE_CLK_PREPARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * clk_bulk_unprepare - undo preparation of a set of clock sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @num_clks: the number of clk_bulk_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @clks: the clk_bulk_data table being unprepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) while (--num_clks >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) clk_unprepare(clks[num_clks].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * clk_bulk_prepare - prepare a set of clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @num_clks: the number of clk_bulk_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @clks: the clk_bulk_data table being prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int __must_check clk_bulk_prepare(int num_clks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = 0; i < num_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ret = clk_prepare(clks[i].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_err("Failed to prepare clk '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) clks[i].id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) clk_bulk_unprepare(i, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) EXPORT_SYMBOL_GPL(clk_bulk_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #endif /* CONFIG_HAVE_CLK_PREPARE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * clk_bulk_disable - gate a set of clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @num_clks: the number of clk_bulk_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @clks: the clk_bulk_data table being gated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * clk_bulk_disable must not sleep, which differentiates it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * clk_bulk_unprepare. clk_bulk_disable must be called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * clk_bulk_unprepare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) while (--num_clks >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) clk_disable(clks[num_clks].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) EXPORT_SYMBOL_GPL(clk_bulk_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * clk_bulk_enable - ungate a set of clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @num_clks: the number of clk_bulk_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @clks: the clk_bulk_data table being ungated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * clk_bulk_enable must not sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Returns 0 on success, -EERROR otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (i = 0; i < num_clks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ret = clk_enable(clks[i].clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pr_err("Failed to enable clk '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) clks[i].id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) clk_bulk_disable(i, clks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXPORT_SYMBOL_GPL(clk_bulk_enable);