perf.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Performance Protocol
  4. *
  5. * Copyright (C) 2018 ARM Ltd.
  6. */
  7. #include <linux/of.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/pm_opp.h>
  10. #include <linux/sort.h>
  11. #include "common.h"
  12. enum scmi_performance_protocol_cmd {
  13. PERF_DOMAIN_ATTRIBUTES = 0x3,
  14. PERF_DESCRIBE_LEVELS = 0x4,
  15. PERF_LIMITS_SET = 0x5,
  16. PERF_LIMITS_GET = 0x6,
  17. PERF_LEVEL_SET = 0x7,
  18. PERF_LEVEL_GET = 0x8,
  19. PERF_NOTIFY_LIMITS = 0x9,
  20. PERF_NOTIFY_LEVEL = 0xa,
  21. };
  22. struct scmi_opp {
  23. u32 perf;
  24. u32 power;
  25. u32 trans_latency_us;
  26. };
  27. struct scmi_msg_resp_perf_attributes {
  28. __le16 num_domains;
  29. __le16 flags;
  30. #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
  31. __le32 stats_addr_low;
  32. __le32 stats_addr_high;
  33. __le32 stats_size;
  34. };
  35. struct scmi_msg_resp_perf_domain_attributes {
  36. __le32 flags;
  37. #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
  38. #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
  39. #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
  40. #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
  41. __le32 rate_limit_us;
  42. __le32 sustained_freq_khz;
  43. __le32 sustained_perf_level;
  44. u8 name[SCMI_MAX_STR_SIZE];
  45. };
  46. struct scmi_msg_perf_describe_levels {
  47. __le32 domain;
  48. __le32 level_index;
  49. };
  50. struct scmi_perf_set_limits {
  51. __le32 domain;
  52. __le32 max_level;
  53. __le32 min_level;
  54. };
  55. struct scmi_perf_get_limits {
  56. __le32 max_level;
  57. __le32 min_level;
  58. };
  59. struct scmi_perf_set_level {
  60. __le32 domain;
  61. __le32 level;
  62. };
  63. struct scmi_perf_notify_level_or_limits {
  64. __le32 domain;
  65. __le32 notify_enable;
  66. };
  67. struct scmi_msg_resp_perf_describe_levels {
  68. __le16 num_returned;
  69. __le16 num_remaining;
  70. struct {
  71. __le32 perf_val;
  72. __le32 power;
  73. __le16 transition_latency_us;
  74. __le16 reserved;
  75. } opp[0];
  76. };
  77. struct perf_dom_info {
  78. bool set_limits;
  79. bool set_perf;
  80. bool perf_limit_notify;
  81. bool perf_level_notify;
  82. u32 opp_count;
  83. u32 sustained_freq_khz;
  84. u32 sustained_perf_level;
  85. u32 mult_factor;
  86. char name[SCMI_MAX_STR_SIZE];
  87. struct scmi_opp opp[MAX_OPPS];
  88. };
  89. struct scmi_perf_info {
  90. int num_domains;
  91. bool power_scale_mw;
  92. u64 stats_addr;
  93. u32 stats_size;
  94. struct perf_dom_info *dom_info;
  95. };
  96. static int scmi_perf_attributes_get(const struct scmi_handle *handle,
  97. struct scmi_perf_info *pi)
  98. {
  99. int ret;
  100. struct scmi_xfer *t;
  101. struct scmi_msg_resp_perf_attributes *attr;
  102. ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
  103. SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
  104. if (ret)
  105. return ret;
  106. attr = t->rx.buf;
  107. ret = scmi_do_xfer(handle, t);
  108. if (!ret) {
  109. u16 flags = le16_to_cpu(attr->flags);
  110. pi->num_domains = le16_to_cpu(attr->num_domains);
  111. pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
  112. pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
  113. (u64)le32_to_cpu(attr->stats_addr_high) << 32;
  114. pi->stats_size = le32_to_cpu(attr->stats_size);
  115. }
  116. scmi_xfer_put(handle, t);
  117. return ret;
  118. }
  119. static int
  120. scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
  121. struct perf_dom_info *dom_info)
  122. {
  123. int ret;
  124. struct scmi_xfer *t;
  125. struct scmi_msg_resp_perf_domain_attributes *attr;
  126. ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
  127. SCMI_PROTOCOL_PERF, sizeof(domain),
  128. sizeof(*attr), &t);
  129. if (ret)
  130. return ret;
  131. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  132. attr = t->rx.buf;
  133. ret = scmi_do_xfer(handle, t);
  134. if (!ret) {
  135. u32 flags = le32_to_cpu(attr->flags);
  136. dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
  137. dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
  138. dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
  139. dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
  140. dom_info->sustained_freq_khz =
  141. le32_to_cpu(attr->sustained_freq_khz);
  142. dom_info->sustained_perf_level =
  143. le32_to_cpu(attr->sustained_perf_level);
  144. dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
  145. dom_info->sustained_perf_level;
  146. memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
  147. }
  148. scmi_xfer_put(handle, t);
  149. return ret;
  150. }
  151. static int opp_cmp_func(const void *opp1, const void *opp2)
  152. {
  153. const struct scmi_opp *t1 = opp1, *t2 = opp2;
  154. return t1->perf - t2->perf;
  155. }
  156. static int
  157. scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
  158. struct perf_dom_info *perf_dom)
  159. {
  160. int ret, cnt;
  161. u32 tot_opp_cnt = 0;
  162. u16 num_returned, num_remaining;
  163. struct scmi_xfer *t;
  164. struct scmi_opp *opp;
  165. struct scmi_msg_perf_describe_levels *dom_info;
  166. struct scmi_msg_resp_perf_describe_levels *level_info;
  167. ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
  168. SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
  169. if (ret)
  170. return ret;
  171. dom_info = t->tx.buf;
  172. level_info = t->rx.buf;
  173. do {
  174. dom_info->domain = cpu_to_le32(domain);
  175. /* Set the number of OPPs to be skipped/already read */
  176. dom_info->level_index = cpu_to_le32(tot_opp_cnt);
  177. ret = scmi_do_xfer(handle, t);
  178. if (ret)
  179. break;
  180. num_returned = le16_to_cpu(level_info->num_returned);
  181. num_remaining = le16_to_cpu(level_info->num_remaining);
  182. if (tot_opp_cnt + num_returned > MAX_OPPS) {
  183. dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
  184. break;
  185. }
  186. opp = &perf_dom->opp[tot_opp_cnt];
  187. for (cnt = 0; cnt < num_returned; cnt++, opp++) {
  188. opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
  189. opp->power = le32_to_cpu(level_info->opp[cnt].power);
  190. opp->trans_latency_us = le16_to_cpu
  191. (level_info->opp[cnt].transition_latency_us);
  192. dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
  193. opp->perf, opp->power, opp->trans_latency_us);
  194. }
  195. tot_opp_cnt += num_returned;
  196. /*
  197. * check for both returned and remaining to avoid infinite
  198. * loop due to buggy firmware
  199. */
  200. } while (num_returned && num_remaining);
  201. perf_dom->opp_count = tot_opp_cnt;
  202. scmi_xfer_put(handle, t);
  203. sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
  204. return ret;
  205. }
  206. static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
  207. u32 max_perf, u32 min_perf)
  208. {
  209. int ret;
  210. struct scmi_xfer *t;
  211. struct scmi_perf_set_limits *limits;
  212. ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
  213. sizeof(*limits), 0, &t);
  214. if (ret)
  215. return ret;
  216. limits = t->tx.buf;
  217. limits->domain = cpu_to_le32(domain);
  218. limits->max_level = cpu_to_le32(max_perf);
  219. limits->min_level = cpu_to_le32(min_perf);
  220. ret = scmi_do_xfer(handle, t);
  221. scmi_xfer_put(handle, t);
  222. return ret;
  223. }
  224. static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
  225. u32 *max_perf, u32 *min_perf)
  226. {
  227. int ret;
  228. struct scmi_xfer *t;
  229. struct scmi_perf_get_limits *limits;
  230. ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
  231. sizeof(__le32), 0, &t);
  232. if (ret)
  233. return ret;
  234. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  235. ret = scmi_do_xfer(handle, t);
  236. if (!ret) {
  237. limits = t->rx.buf;
  238. *max_perf = le32_to_cpu(limits->max_level);
  239. *min_perf = le32_to_cpu(limits->min_level);
  240. }
  241. scmi_xfer_put(handle, t);
  242. return ret;
  243. }
  244. static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
  245. u32 level, bool poll)
  246. {
  247. int ret;
  248. struct scmi_xfer *t;
  249. struct scmi_perf_set_level *lvl;
  250. ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
  251. sizeof(*lvl), 0, &t);
  252. if (ret)
  253. return ret;
  254. t->hdr.poll_completion = poll;
  255. lvl = t->tx.buf;
  256. lvl->domain = cpu_to_le32(domain);
  257. lvl->level = cpu_to_le32(level);
  258. ret = scmi_do_xfer(handle, t);
  259. scmi_xfer_put(handle, t);
  260. return ret;
  261. }
  262. static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
  263. u32 *level, bool poll)
  264. {
  265. int ret;
  266. struct scmi_xfer *t;
  267. ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
  268. sizeof(u32), sizeof(u32), &t);
  269. if (ret)
  270. return ret;
  271. t->hdr.poll_completion = poll;
  272. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  273. ret = scmi_do_xfer(handle, t);
  274. if (!ret)
  275. *level = le32_to_cpu(*(__le32 *)t->rx.buf);
  276. scmi_xfer_put(handle, t);
  277. return ret;
  278. }
  279. /* Device specific ops */
  280. static int scmi_dev_domain_id(struct device *dev)
  281. {
  282. struct of_phandle_args clkspec;
  283. if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
  284. 0, &clkspec))
  285. return -EINVAL;
  286. return clkspec.args[0];
  287. }
  288. static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
  289. struct device *dev)
  290. {
  291. int idx, ret, domain;
  292. unsigned long freq;
  293. struct scmi_opp *opp;
  294. struct perf_dom_info *dom;
  295. struct scmi_perf_info *pi = handle->perf_priv;
  296. domain = scmi_dev_domain_id(dev);
  297. if (domain < 0)
  298. return domain;
  299. dom = pi->dom_info + domain;
  300. if (!dom)
  301. return -EIO;
  302. for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
  303. freq = opp->perf * dom->mult_factor;
  304. ret = dev_pm_opp_add(dev, freq, 0);
  305. if (ret) {
  306. dev_warn(dev, "failed to add opp %luHz\n", freq);
  307. while (idx-- > 0) {
  308. freq = (--opp)->perf * dom->mult_factor;
  309. dev_pm_opp_remove(dev, freq);
  310. }
  311. return ret;
  312. }
  313. }
  314. return 0;
  315. }
  316. static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
  317. struct device *dev)
  318. {
  319. struct perf_dom_info *dom;
  320. struct scmi_perf_info *pi = handle->perf_priv;
  321. int domain = scmi_dev_domain_id(dev);
  322. if (domain < 0)
  323. return domain;
  324. dom = pi->dom_info + domain;
  325. if (!dom)
  326. return -EIO;
  327. /* uS to nS */
  328. return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
  329. }
  330. static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
  331. unsigned long freq, bool poll)
  332. {
  333. struct scmi_perf_info *pi = handle->perf_priv;
  334. struct perf_dom_info *dom = pi->dom_info + domain;
  335. return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
  336. poll);
  337. }
  338. static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
  339. unsigned long *freq, bool poll)
  340. {
  341. int ret;
  342. u32 level;
  343. struct scmi_perf_info *pi = handle->perf_priv;
  344. struct perf_dom_info *dom = pi->dom_info + domain;
  345. ret = scmi_perf_level_get(handle, domain, &level, poll);
  346. if (!ret)
  347. *freq = level * dom->mult_factor;
  348. return ret;
  349. }
  350. static struct scmi_perf_ops perf_ops = {
  351. .limits_set = scmi_perf_limits_set,
  352. .limits_get = scmi_perf_limits_get,
  353. .level_set = scmi_perf_level_set,
  354. .level_get = scmi_perf_level_get,
  355. .device_domain_id = scmi_dev_domain_id,
  356. .transition_latency_get = scmi_dvfs_transition_latency_get,
  357. .device_opps_add = scmi_dvfs_device_opps_add,
  358. .freq_set = scmi_dvfs_freq_set,
  359. .freq_get = scmi_dvfs_freq_get,
  360. };
  361. static int scmi_perf_protocol_init(struct scmi_handle *handle)
  362. {
  363. int domain;
  364. u32 version;
  365. struct scmi_perf_info *pinfo;
  366. scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
  367. dev_dbg(handle->dev, "Performance Version %d.%d\n",
  368. PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
  369. pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
  370. if (!pinfo)
  371. return -ENOMEM;
  372. scmi_perf_attributes_get(handle, pinfo);
  373. pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
  374. sizeof(*pinfo->dom_info), GFP_KERNEL);
  375. if (!pinfo->dom_info)
  376. return -ENOMEM;
  377. for (domain = 0; domain < pinfo->num_domains; domain++) {
  378. struct perf_dom_info *dom = pinfo->dom_info + domain;
  379. scmi_perf_domain_attributes_get(handle, domain, dom);
  380. scmi_perf_describe_levels_get(handle, domain, dom);
  381. }
  382. handle->perf_ops = &perf_ops;
  383. handle->perf_priv = pinfo;
  384. return 0;
  385. }
  386. static int __init scmi_perf_init(void)
  387. {
  388. return scmi_protocol_register(SCMI_PROTOCOL_PERF,
  389. &scmi_perf_protocol_init);
  390. }
  391. subsys_initcall(scmi_perf_init);