clock.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Clock Protocol
  4. *
  5. * Copyright (C) 2018 ARM Ltd.
  6. */
  7. #include "common.h"
  8. enum scmi_clock_protocol_cmd {
  9. CLOCK_ATTRIBUTES = 0x3,
  10. CLOCK_DESCRIBE_RATES = 0x4,
  11. CLOCK_RATE_SET = 0x5,
  12. CLOCK_RATE_GET = 0x6,
  13. CLOCK_CONFIG_SET = 0x7,
  14. };
  15. struct scmi_msg_resp_clock_protocol_attributes {
  16. __le16 num_clocks;
  17. u8 max_async_req;
  18. u8 reserved;
  19. };
  20. struct scmi_msg_resp_clock_attributes {
  21. __le32 attributes;
  22. #define CLOCK_ENABLE BIT(0)
  23. u8 name[SCMI_MAX_STR_SIZE];
  24. };
  25. struct scmi_clock_set_config {
  26. __le32 id;
  27. __le32 attributes;
  28. };
  29. struct scmi_msg_clock_describe_rates {
  30. __le32 id;
  31. __le32 rate_index;
  32. };
  33. struct scmi_msg_resp_clock_describe_rates {
  34. __le32 num_rates_flags;
  35. #define NUM_RETURNED(x) ((x) & 0xfff)
  36. #define RATE_DISCRETE(x) !((x) & BIT(12))
  37. #define NUM_REMAINING(x) ((x) >> 16)
  38. struct {
  39. __le32 value_low;
  40. __le32 value_high;
  41. } rate[0];
  42. #define RATE_TO_U64(X) \
  43. ({ \
  44. typeof(X) x = (X); \
  45. le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
  46. })
  47. };
  48. struct scmi_clock_set_rate {
  49. __le32 flags;
  50. #define CLOCK_SET_ASYNC BIT(0)
  51. #define CLOCK_SET_DELAYED BIT(1)
  52. #define CLOCK_SET_ROUND_UP BIT(2)
  53. #define CLOCK_SET_ROUND_AUTO BIT(3)
  54. __le32 id;
  55. __le32 value_low;
  56. __le32 value_high;
  57. };
  58. struct clock_info {
  59. int num_clocks;
  60. int max_async_req;
  61. struct scmi_clock_info *clk;
  62. };
  63. static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
  64. struct clock_info *ci)
  65. {
  66. int ret;
  67. struct scmi_xfer *t;
  68. struct scmi_msg_resp_clock_protocol_attributes *attr;
  69. ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
  70. SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
  71. if (ret)
  72. return ret;
  73. attr = t->rx.buf;
  74. ret = scmi_do_xfer(handle, t);
  75. if (!ret) {
  76. ci->num_clocks = le16_to_cpu(attr->num_clocks);
  77. ci->max_async_req = attr->max_async_req;
  78. }
  79. scmi_xfer_put(handle, t);
  80. return ret;
  81. }
  82. static int scmi_clock_attributes_get(const struct scmi_handle *handle,
  83. u32 clk_id, struct scmi_clock_info *clk)
  84. {
  85. int ret;
  86. struct scmi_xfer *t;
  87. struct scmi_msg_resp_clock_attributes *attr;
  88. ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
  89. sizeof(clk_id), sizeof(*attr), &t);
  90. if (ret)
  91. return ret;
  92. *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
  93. attr = t->rx.buf;
  94. ret = scmi_do_xfer(handle, t);
  95. if (!ret)
  96. strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
  97. else
  98. clk->name[0] = '\0';
  99. scmi_xfer_put(handle, t);
  100. return ret;
  101. }
  102. static int
  103. scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
  104. struct scmi_clock_info *clk)
  105. {
  106. u64 *rate;
  107. int ret, cnt;
  108. bool rate_discrete = false;
  109. u32 tot_rate_cnt = 0, rates_flag;
  110. u16 num_returned, num_remaining;
  111. struct scmi_xfer *t;
  112. struct scmi_msg_clock_describe_rates *clk_desc;
  113. struct scmi_msg_resp_clock_describe_rates *rlist;
  114. ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
  115. SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
  116. if (ret)
  117. return ret;
  118. clk_desc = t->tx.buf;
  119. rlist = t->rx.buf;
  120. do {
  121. clk_desc->id = cpu_to_le32(clk_id);
  122. /* Set the number of rates to be skipped/already read */
  123. clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
  124. ret = scmi_do_xfer(handle, t);
  125. if (ret)
  126. goto err;
  127. rates_flag = le32_to_cpu(rlist->num_rates_flags);
  128. num_remaining = NUM_REMAINING(rates_flag);
  129. rate_discrete = RATE_DISCRETE(rates_flag);
  130. num_returned = NUM_RETURNED(rates_flag);
  131. if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
  132. dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
  133. break;
  134. }
  135. if (!rate_discrete) {
  136. clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
  137. clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
  138. clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
  139. dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
  140. clk->range.min_rate, clk->range.max_rate,
  141. clk->range.step_size);
  142. break;
  143. }
  144. rate = &clk->list.rates[tot_rate_cnt];
  145. for (cnt = 0; cnt < num_returned; cnt++, rate++) {
  146. *rate = RATE_TO_U64(rlist->rate[cnt]);
  147. dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
  148. }
  149. tot_rate_cnt += num_returned;
  150. /*
  151. * check for both returned and remaining to avoid infinite
  152. * loop due to buggy firmware
  153. */
  154. } while (num_returned && num_remaining);
  155. if (rate_discrete)
  156. clk->list.num_rates = tot_rate_cnt;
  157. err:
  158. scmi_xfer_put(handle, t);
  159. return ret;
  160. }
  161. static int
  162. scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
  163. {
  164. int ret;
  165. struct scmi_xfer *t;
  166. ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
  167. sizeof(__le32), sizeof(u64), &t);
  168. if (ret)
  169. return ret;
  170. *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
  171. ret = scmi_do_xfer(handle, t);
  172. if (!ret) {
  173. __le32 *pval = t->rx.buf;
  174. *value = le32_to_cpu(*pval);
  175. *value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
  176. }
  177. scmi_xfer_put(handle, t);
  178. return ret;
  179. }
  180. static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
  181. u32 config, u64 rate)
  182. {
  183. int ret;
  184. struct scmi_xfer *t;
  185. struct scmi_clock_set_rate *cfg;
  186. ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
  187. sizeof(*cfg), 0, &t);
  188. if (ret)
  189. return ret;
  190. cfg = t->tx.buf;
  191. cfg->flags = cpu_to_le32(config);
  192. cfg->id = cpu_to_le32(clk_id);
  193. cfg->value_low = cpu_to_le32(rate & 0xffffffff);
  194. cfg->value_high = cpu_to_le32(rate >> 32);
  195. ret = scmi_do_xfer(handle, t);
  196. scmi_xfer_put(handle, t);
  197. return ret;
  198. }
  199. static int
  200. scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
  201. {
  202. int ret;
  203. struct scmi_xfer *t;
  204. struct scmi_clock_set_config *cfg;
  205. ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
  206. sizeof(*cfg), 0, &t);
  207. if (ret)
  208. return ret;
  209. cfg = t->tx.buf;
  210. cfg->id = cpu_to_le32(clk_id);
  211. cfg->attributes = cpu_to_le32(config);
  212. ret = scmi_do_xfer(handle, t);
  213. scmi_xfer_put(handle, t);
  214. return ret;
  215. }
  216. static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
  217. {
  218. return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
  219. }
  220. static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
  221. {
  222. return scmi_clock_config_set(handle, clk_id, 0);
  223. }
  224. static int scmi_clock_count_get(const struct scmi_handle *handle)
  225. {
  226. struct clock_info *ci = handle->clk_priv;
  227. return ci->num_clocks;
  228. }
  229. static const struct scmi_clock_info *
  230. scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
  231. {
  232. struct clock_info *ci = handle->clk_priv;
  233. struct scmi_clock_info *clk = ci->clk + clk_id;
  234. if (!clk->name[0])
  235. return NULL;
  236. return clk;
  237. }
  238. static struct scmi_clk_ops clk_ops = {
  239. .count_get = scmi_clock_count_get,
  240. .info_get = scmi_clock_info_get,
  241. .rate_get = scmi_clock_rate_get,
  242. .rate_set = scmi_clock_rate_set,
  243. .enable = scmi_clock_enable,
  244. .disable = scmi_clock_disable,
  245. };
  246. static int scmi_clock_protocol_init(struct scmi_handle *handle)
  247. {
  248. u32 version;
  249. int clkid, ret;
  250. struct clock_info *cinfo;
  251. scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
  252. dev_dbg(handle->dev, "Clock Version %d.%d\n",
  253. PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
  254. cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
  255. if (!cinfo)
  256. return -ENOMEM;
  257. scmi_clock_protocol_attributes_get(handle, cinfo);
  258. cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
  259. sizeof(*cinfo->clk), GFP_KERNEL);
  260. if (!cinfo->clk)
  261. return -ENOMEM;
  262. for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
  263. struct scmi_clock_info *clk = cinfo->clk + clkid;
  264. ret = scmi_clock_attributes_get(handle, clkid, clk);
  265. if (!ret)
  266. scmi_clock_describe_rates_get(handle, clkid, clk);
  267. }
  268. handle->clk_ops = &clk_ops;
  269. handle->clk_priv = cinfo;
  270. return 0;
  271. }
  272. static int __init scmi_clock_init(void)
  273. {
  274. return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
  275. &scmi_clock_protocol_init);
  276. }
  277. subsys_initcall(scmi_clock_init);