Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * System Control and Management Interface (SCMI) Clock Protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2018-2020 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) enum scmi_clock_protocol_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	CLOCK_ATTRIBUTES = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	CLOCK_DESCRIBE_RATES = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	CLOCK_RATE_SET = 0x5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	CLOCK_RATE_GET = 0x6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	CLOCK_CONFIG_SET = 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) struct scmi_msg_resp_clock_protocol_attributes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	__le16 num_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	u8 max_async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	u8 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) struct scmi_msg_resp_clock_attributes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	__le32 attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define	CLOCK_ENABLE	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	    u8 name[SCMI_MAX_STR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct scmi_clock_set_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	__le32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	__le32 attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) struct scmi_msg_clock_describe_rates {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	__le32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	__le32 rate_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct scmi_msg_resp_clock_describe_rates {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	__le32 num_rates_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define NUM_RETURNED(x)		((x) & 0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define RATE_DISCRETE(x)	!((x) & BIT(12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define NUM_REMAINING(x)	((x) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		__le32 value_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		__le32 value_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	} rate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define RATE_TO_U64(X)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) ({				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	typeof(X) x = (X);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct scmi_clock_set_rate {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	__le32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define CLOCK_SET_ASYNC		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define CLOCK_SET_IGNORE_RESP	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define CLOCK_SET_ROUND_UP	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define CLOCK_SET_ROUND_AUTO	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	__le32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	__le32 value_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	__le32 value_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) struct clock_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int num_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	int max_async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	atomic_t cur_async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct scmi_clock_info *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 				   struct clock_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct scmi_msg_resp_clock_protocol_attributes *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 				      0, sizeof(*attr), &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	attr = t->rx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		ci->max_async_req = attr->max_async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				     u32 clk_id, struct scmi_clock_info *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct scmi_msg_resp_clock_attributes *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 				      sizeof(clk_id), sizeof(*attr), &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	put_unaligned_le32(clk_id, t->tx.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	attr = t->rx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		clk->name[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int rate_cmp_func(const void *_r1, const void *_r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	const u64 *r1 = _r1, *r2 = _r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (*r1 < *r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	else if (*r1 == *r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			      struct scmi_clock_info *clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	u64 *rate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	int ret, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	bool rate_discrete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	u32 tot_rate_cnt = 0, rates_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u16 num_returned, num_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct scmi_msg_clock_describe_rates *clk_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct scmi_msg_resp_clock_describe_rates *rlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 				      sizeof(*clk_desc), 0, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	clk_desc = t->tx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	rlist = t->rx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		clk_desc->id = cpu_to_le32(clk_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		/* Set the number of rates to be skipped/already read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		rates_flag = le32_to_cpu(rlist->num_rates_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		num_remaining = NUM_REMAINING(rates_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		rate_discrete = RATE_DISCRETE(rates_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		num_returned = NUM_RETURNED(rates_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		if (!rate_discrete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				clk->range.min_rate, clk->range.max_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 				clk->range.step_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		rate = &clk->list.rates[tot_rate_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			*rate = RATE_TO_U64(rlist->rate[cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		tot_rate_cnt += num_returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		ph->xops->reset_rx_to_maxsz(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		 * check for both returned and remaining to avoid infinite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		 * loop due to buggy firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	} while (num_returned && num_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (rate_discrete && rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		clk->list.num_rates = tot_rate_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	clk->rate_discrete = rate_discrete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		    u32 clk_id, u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 				      sizeof(__le32), sizeof(u64), &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	put_unaligned_le32(clk_id, t->tx.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		*value = get_unaligned_le64(t->rx.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			       u32 clk_id, u64 rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct scmi_clock_set_rate *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct clock_info *ci = ph->get_priv(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (ci->max_async_req &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	    atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		flags |= CLOCK_SET_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	cfg = t->tx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	cfg->flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	cfg->id = cpu_to_le32(clk_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	cfg->value_high = cpu_to_le32(rate >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (flags & CLOCK_SET_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		ret = ph->xops->do_xfer_with_response(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (ci->max_async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		atomic_dec(&ci->cur_async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		      u32 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct scmi_xfer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct scmi_clock_set_config *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				      sizeof(*cfg), 0, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	cfg = t->tx.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	cfg->id = cpu_to_le32(clk_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	cfg->attributes = cpu_to_le32(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	ret = ph->xops->do_xfer(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	ph->xops->xfer_put(ph, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	return scmi_clock_config_set(ph, clk_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct clock_info *ci = ph->get_priv(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	return ci->num_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static const struct scmi_clock_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct clock_info *ci = ph->get_priv(ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct scmi_clock_info *clk = ci->clk + clk_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (!clk->name[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	return clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static const struct scmi_clk_proto_ops clk_proto_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	.count_get = scmi_clock_count_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	.info_get = scmi_clock_info_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	.rate_get = scmi_clock_rate_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.rate_set = scmi_clock_rate_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	.enable = scmi_clock_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	.disable = scmi_clock_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	int clkid, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	struct clock_info *cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	ph->xops->version_get(ph, &version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	dev_dbg(ph->dev, "Clock Version %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	scmi_clock_protocol_attributes_get(ph, cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				  sizeof(*cinfo->clk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (!cinfo->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		struct scmi_clock_info *clk = cinfo->clk + clkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		ret = scmi_clock_attributes_get(ph, clkid, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			scmi_clock_describe_rates_get(ph, clkid, clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	cinfo->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	return ph->set_priv(ph, cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static const struct scmi_protocol scmi_clock = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	.id = SCMI_PROTOCOL_CLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	.init_instance = &scmi_clock_protocol_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.ops = &clk_proto_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)