^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2018-2020 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interconnect.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ipa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "ipa_clock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "ipa_modem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * DOC: IPA Clocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The "IPA Clock" manages both the IPA core clock and the interconnects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * (buses) the IPA depends on as a single logical entity. A reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * is incremented by "get" operations and decremented by "put" operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Transitions of that count from 0 to 1 result in the clock and interconnects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * being enabled, and transitions of the count from 1 to 0 cause them to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * disabled. We currently operate the core clock at a fixed clock rate, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * all buses at a fixed average and peak bandwidth. As more advanced IPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * features are enabled, we can make better use of clock and bus scaling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * An IPA clock reference must be held for any access to IPA hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Interconnect path bandwidths (each times 1000 bytes per second) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define IPA_MEMORY_PEAK (600 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define IPA_IMEM_AVG (80 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define IPA_IMEM_PEAK (350 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define IPA_CONFIG_AVG (40 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define IPA_CONFIG_PEAK (40 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * struct ipa_clock - IPA clocking information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @count: Clocking reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @mutex: Protects clock enable/disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @core: IPA core clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @memory_path: Memory interconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @imem_path: Internal memory interconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @config_path: Configuration space interconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ipa_clock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) refcount_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct mutex mutex; /* protects clock enable/disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct clk *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct icc_path *memory_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct icc_path *imem_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct icc_path *config_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct icc_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ipa_interconnect_init_one(struct device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct icc_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) path = of_icc_get(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dev_err(dev, "error %ld getting %s interconnect\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) PTR_ERR(path), name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Initialize interconnects required for IPA operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct icc_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) path = ipa_interconnect_init_one(dev, "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) goto err_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) clock->memory_path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) path = ipa_interconnect_init_one(dev, "imem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) goto err_memory_path_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) clock->imem_path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) path = ipa_interconnect_init_one(dev, "config");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) goto err_imem_path_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) clock->config_path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) err_imem_path_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) icc_put(clock->imem_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) err_memory_path_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) icc_put(clock->memory_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) err_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Inverse of ipa_interconnect_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void ipa_interconnect_exit(struct ipa_clock *clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) icc_put(clock->config_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) icc_put(clock->imem_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) icc_put(clock->memory_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Currently we only use one bandwidth level, so just "enable" interconnects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int ipa_interconnect_enable(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct ipa_clock *clock = ipa->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto err_memory_path_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto err_imem_path_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) err_imem_path_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) (void)icc_set_bw(clock->imem_path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) err_memory_path_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) (void)icc_set_bw(clock->memory_path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* To disable an interconnect, we just its bandwidth to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int ipa_interconnect_disable(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct ipa_clock *clock = ipa->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = icc_set_bw(clock->memory_path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = icc_set_bw(clock->imem_path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto err_memory_path_reenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ret = icc_set_bw(clock->config_path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto err_imem_path_reenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) err_imem_path_reenable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err_memory_path_reenable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Turn on IPA clocks, including interconnects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int ipa_clock_enable(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret = ipa_interconnect_enable(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ret = clk_prepare_enable(ipa->clock->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ipa_interconnect_disable(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Inverse of ipa_clock_enable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void ipa_clock_disable(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clk_disable_unprepare(ipa->clock->core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (void)ipa_interconnect_disable(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Get an IPA clock reference, but only if the reference count is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * already non-zero. Returns true if the additional reference was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * added successfully, or false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bool ipa_clock_get_additional(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return refcount_inc_not_zero(&ipa->clock->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Get an IPA clock reference. If the reference count is non-zero, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * incremented and return is immediate. Otherwise it is checked again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * under protection of the mutex, and if appropriate the IPA clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Incrementing the reference count is intentionally deferred until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * after the clock is running and endpoints are resumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void ipa_clock_get(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct ipa_clock *clock = ipa->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* If the clock is running, just bump the reference count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ipa_clock_get_additional(ipa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Otherwise get the mutex and check again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mutex_lock(&clock->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* A reference might have been added before we got the mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ipa_clock_get_additional(ipa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto out_mutex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = ipa_clock_enable(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto out_mutex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) refcount_set(&clock->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) out_mutex_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mutex_unlock(&clock->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Attempt to remove an IPA clock reference. If this represents the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * last reference, disable the IPA clock under protection of the mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void ipa_clock_put(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct ipa_clock *clock = ipa->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* If this is not the last reference there's nothing more to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ipa_clock_disable(ipa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mutex_unlock(&clock->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Return the current IPA core clock rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 ipa_clock_rate(struct ipa *ipa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Initialize IPA clocking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct ipa_clock *ipa_clock_init(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct ipa_clock *clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) clk = clk_get(dev, "core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return ERR_CAST(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dev_err(dev, "error %d setting core clock rate to %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ret, IPA_CORE_CLOCK_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) goto err_clk_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) clock = kzalloc(sizeof(*clock), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto err_clk_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) clock->core = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ret = ipa_interconnect_init(clock, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) goto err_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) mutex_init(&clock->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) refcount_set(&clock->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) err_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kfree(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) err_clk_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Inverse of ipa_clock_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void ipa_clock_exit(struct ipa_clock *clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct clk *clk = clock->core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) WARN_ON(refcount_read(&clock->count) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mutex_destroy(&clock->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ipa_interconnect_exit(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kfree(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }