cppc_acpi.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
  3. *
  4. * (C) Copyright 2014, 2015 Linaro Ltd.
  5. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. *
  12. * CPPC describes a few methods for controlling CPU performance using
  13. * information from a per CPU table called CPC. This table is described in
  14. * the ACPI v5.0+ specification. The table consists of a list of
  15. * registers which may be memory mapped or hardware registers and also may
  16. * include some static integer values.
  17. *
  18. * CPU performance is on an abstract continuous scale as against a discretized
  19. * P-state scale which is tied to CPU frequency only. In brief, the basic
  20. * operation involves:
  21. *
  22. * - OS makes a CPU performance request. (Can provide min and max bounds)
  23. *
  24. * - Platform (such as BMC) is free to optimize request within requested bounds
  25. * depending on power/thermal budgets etc.
  26. *
  27. * - Platform conveys its decision back to OS
  28. *
  29. * The communication between OS and platform occurs through another medium
  30. * called (PCC) Platform Communication Channel. This is a generic mailbox like
  31. * mechanism which includes doorbell semantics to indicate register updates.
  32. * See drivers/mailbox/pcc.c for details on PCC.
  33. *
  34. * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
  35. * above specifications.
  36. */
  37. #define pr_fmt(fmt) "ACPI CPPC: " fmt
  38. #include <linux/cpufreq.h>
  39. #include <linux/delay.h>
  40. #include <linux/ktime.h>
  41. #include <acpi/cppc_acpi.h>
  42. /*
  43. * Lock to provide mutually exclusive access to the PCC
  44. * channel. e.g. When the remote updates the shared region
  45. * with new data, the reader needs to be protected from
  46. * other CPUs activity on the same channel.
  47. */
  48. static DEFINE_SPINLOCK(pcc_lock);
  49. /*
  50. * The cpc_desc structure contains the ACPI register details
  51. * as described in the per CPU _CPC tables. The details
  52. * include the type of register (e.g. PCC, System IO, FFH etc.)
  53. * and destination addresses which lets us READ/WRITE CPU performance
  54. * information using the appropriate I/O methods.
  55. */
  56. static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
  57. /* This layer handles all the PCC specifics for CPPC. */
  58. static struct mbox_chan *pcc_channel;
  59. static void __iomem *pcc_comm_addr;
  60. static u64 comm_base_addr;
  61. static int pcc_subspace_idx = -1;
  62. static bool pcc_channel_acquired;
  63. static ktime_t deadline;
  64. /*
  65. * Arbitrary Retries in case the remote processor is slow to respond
  66. * to PCC commands. Keeping it high enough to cover emulators where
  67. * the processors run painfully slow.
  68. */
  69. #define NUM_RETRIES 500
  70. static int check_pcc_chan(void)
  71. {
  72. int ret = -EIO;
  73. struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
  74. ktime_t next_deadline = ktime_add(ktime_get(), deadline);
  75. /* Retry in case the remote processor was too slow to catch up. */
  76. while (!ktime_after(ktime_get(), next_deadline)) {
  77. if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
  78. ret = 0;
  79. break;
  80. }
  81. /*
  82. * Reducing the bus traffic in case this loop takes longer than
  83. * a few retries.
  84. */
  85. udelay(3);
  86. }
  87. return ret;
  88. }
  89. static int send_pcc_cmd(u16 cmd)
  90. {
  91. int ret = -EIO;
  92. struct acpi_pcct_shared_memory *generic_comm_base =
  93. (struct acpi_pcct_shared_memory *) pcc_comm_addr;
  94. /*
  95. * For CMD_WRITE we know for a fact the caller should have checked
  96. * the channel before writing to PCC space
  97. */
  98. if (cmd == CMD_READ) {
  99. ret = check_pcc_chan();
  100. if (ret)
  101. return ret;
  102. }
  103. /* Write to the shared comm region. */
  104. writew(cmd, &generic_comm_base->command);
  105. /* Flip CMD COMPLETE bit */
  106. writew(0, &generic_comm_base->status);
  107. /* Ring doorbell */
  108. ret = mbox_send_message(pcc_channel, &cmd);
  109. if (ret < 0) {
  110. pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
  111. cmd, ret);
  112. return ret;
  113. }
  114. /*
  115. * For READs we need to ensure the cmd completed to ensure
  116. * the ensuing read()s can proceed. For WRITEs we dont care
  117. * because the actual write()s are done before coming here
  118. * and the next READ or WRITE will check if the channel
  119. * is busy/free at the entry of this call.
  120. */
  121. if (cmd == CMD_READ)
  122. ret = check_pcc_chan();
  123. mbox_client_txdone(pcc_channel, ret);
  124. return ret;
  125. }
  126. static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
  127. {
  128. if (ret < 0)
  129. pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
  130. *(u16 *)msg, ret);
  131. else
  132. pr_debug("TX completed. CMD sent:%x, ret:%d\n",
  133. *(u16 *)msg, ret);
  134. }
  135. struct mbox_client cppc_mbox_cl = {
  136. .tx_done = cppc_chan_tx_done,
  137. .knows_txdone = true,
  138. };
  139. static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
  140. {
  141. int result = -EFAULT;
  142. acpi_status status = AE_OK;
  143. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  144. struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
  145. struct acpi_buffer state = {0, NULL};
  146. union acpi_object *psd = NULL;
  147. struct acpi_psd_package *pdomain;
  148. status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
  149. ACPI_TYPE_PACKAGE);
  150. if (ACPI_FAILURE(status))
  151. return -ENODEV;
  152. psd = buffer.pointer;
  153. if (!psd || psd->package.count != 1) {
  154. pr_debug("Invalid _PSD data\n");
  155. goto end;
  156. }
  157. pdomain = &(cpc_ptr->domain_info);
  158. state.length = sizeof(struct acpi_psd_package);
  159. state.pointer = pdomain;
  160. status = acpi_extract_package(&(psd->package.elements[0]),
  161. &format, &state);
  162. if (ACPI_FAILURE(status)) {
  163. pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
  164. goto end;
  165. }
  166. if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
  167. pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
  168. goto end;
  169. }
  170. if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
  171. pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
  172. goto end;
  173. }
  174. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  175. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  176. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  177. pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
  178. goto end;
  179. }
  180. result = 0;
  181. end:
  182. kfree(buffer.pointer);
  183. return result;
  184. }
  185. /**
  186. * acpi_get_psd_map - Map the CPUs in a common freq domain.
  187. * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
  188. *
  189. * Return: 0 for success or negative value for err.
  190. */
  191. int acpi_get_psd_map(struct cpudata **all_cpu_data)
  192. {
  193. int count_target;
  194. int retval = 0;
  195. unsigned int i, j;
  196. cpumask_var_t covered_cpus;
  197. struct cpudata *pr, *match_pr;
  198. struct acpi_psd_package *pdomain;
  199. struct acpi_psd_package *match_pdomain;
  200. struct cpc_desc *cpc_ptr, *match_cpc_ptr;
  201. if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
  202. return -ENOMEM;
  203. /*
  204. * Now that we have _PSD data from all CPUs, lets setup P-state
  205. * domain info.
  206. */
  207. for_each_possible_cpu(i) {
  208. pr = all_cpu_data[i];
  209. if (!pr)
  210. continue;
  211. if (cpumask_test_cpu(i, covered_cpus))
  212. continue;
  213. cpc_ptr = per_cpu(cpc_desc_ptr, i);
  214. if (!cpc_ptr)
  215. continue;
  216. pdomain = &(cpc_ptr->domain_info);
  217. cpumask_set_cpu(i, pr->shared_cpu_map);
  218. cpumask_set_cpu(i, covered_cpus);
  219. if (pdomain->num_processors <= 1)
  220. continue;
  221. /* Validate the Domain info */
  222. count_target = pdomain->num_processors;
  223. if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
  224. pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  225. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
  226. pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
  227. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
  228. pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  229. for_each_possible_cpu(j) {
  230. if (i == j)
  231. continue;
  232. match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
  233. if (!match_cpc_ptr)
  234. continue;
  235. match_pdomain = &(match_cpc_ptr->domain_info);
  236. if (match_pdomain->domain != pdomain->domain)
  237. continue;
  238. /* Here i and j are in the same domain */
  239. if (match_pdomain->num_processors != count_target) {
  240. retval = -EFAULT;
  241. goto err_ret;
  242. }
  243. if (pdomain->coord_type != match_pdomain->coord_type) {
  244. retval = -EFAULT;
  245. goto err_ret;
  246. }
  247. cpumask_set_cpu(j, covered_cpus);
  248. cpumask_set_cpu(j, pr->shared_cpu_map);
  249. }
  250. for_each_possible_cpu(j) {
  251. if (i == j)
  252. continue;
  253. match_pr = all_cpu_data[j];
  254. if (!match_pr)
  255. continue;
  256. match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
  257. if (!match_cpc_ptr)
  258. continue;
  259. match_pdomain = &(match_cpc_ptr->domain_info);
  260. if (match_pdomain->domain != pdomain->domain)
  261. continue;
  262. match_pr->shared_type = pr->shared_type;
  263. cpumask_copy(match_pr->shared_cpu_map,
  264. pr->shared_cpu_map);
  265. }
  266. }
  267. err_ret:
  268. for_each_possible_cpu(i) {
  269. pr = all_cpu_data[i];
  270. if (!pr)
  271. continue;
  272. /* Assume no coordination on any error parsing domain info */
  273. if (retval) {
  274. cpumask_clear(pr->shared_cpu_map);
  275. cpumask_set_cpu(i, pr->shared_cpu_map);
  276. pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  277. }
  278. }
  279. free_cpumask_var(covered_cpus);
  280. return retval;
  281. }
  282. EXPORT_SYMBOL_GPL(acpi_get_psd_map);
  283. static int register_pcc_channel(int pcc_subspace_idx)
  284. {
  285. struct acpi_pcct_hw_reduced *cppc_ss;
  286. unsigned int len;
  287. u64 usecs_lat;
  288. if (pcc_subspace_idx >= 0) {
  289. pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
  290. pcc_subspace_idx);
  291. if (IS_ERR(pcc_channel)) {
  292. pr_err("Failed to find PCC communication channel\n");
  293. return -ENODEV;
  294. }
  295. /*
  296. * The PCC mailbox controller driver should
  297. * have parsed the PCCT (global table of all
  298. * PCC channels) and stored pointers to the
  299. * subspace communication region in con_priv.
  300. */
  301. cppc_ss = pcc_channel->con_priv;
  302. if (!cppc_ss) {
  303. pr_err("No PCC subspace found for CPPC\n");
  304. return -ENODEV;
  305. }
  306. /*
  307. * This is the shared communication region
  308. * for the OS and Platform to communicate over.
  309. */
  310. comm_base_addr = cppc_ss->base_address;
  311. len = cppc_ss->length;
  312. /*
  313. * cppc_ss->latency is just a Nominal value. In reality
  314. * the remote processor could be much slower to reply.
  315. * So add an arbitrary amount of wait on top of Nominal.
  316. */
  317. usecs_lat = NUM_RETRIES * cppc_ss->latency;
  318. deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
  319. pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
  320. if (!pcc_comm_addr) {
  321. pr_err("Failed to ioremap PCC comm region mem\n");
  322. return -ENOMEM;
  323. }
  324. /* Set flag so that we dont come here for each CPU. */
  325. pcc_channel_acquired = true;
  326. }
  327. return 0;
  328. }
  329. /*
  330. * An example CPC table looks like the following.
  331. *
  332. * Name(_CPC, Package()
  333. * {
  334. * 17,
  335. * NumEntries
  336. * 1,
  337. * // Revision
  338. * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
  339. * // Highest Performance
  340. * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
  341. * // Nominal Performance
  342. * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
  343. * // Lowest Nonlinear Performance
  344. * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
  345. * // Lowest Performance
  346. * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
  347. * // Guaranteed Performance Register
  348. * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
  349. * // Desired Performance Register
  350. * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
  351. * ..
  352. * ..
  353. * ..
  354. *
  355. * }
  356. * Each Register() encodes how to access that specific register.
  357. * e.g. a sample PCC entry has the following encoding:
  358. *
  359. * Register (
  360. * PCC,
  361. * AddressSpaceKeyword
  362. * 8,
  363. * //RegisterBitWidth
  364. * 8,
  365. * //RegisterBitOffset
  366. * 0x30,
  367. * //RegisterAddress
  368. * 9
  369. * //AccessSize (subspace ID)
  370. * 0
  371. * )
  372. * }
  373. */
  374. /**
  375. * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
  376. * @pr: Ptr to acpi_processor containing this CPUs logical Id.
  377. *
  378. * Return: 0 for success or negative value for err.
  379. */
  380. int acpi_cppc_processor_probe(struct acpi_processor *pr)
  381. {
  382. struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
  383. union acpi_object *out_obj, *cpc_obj;
  384. struct cpc_desc *cpc_ptr;
  385. struct cpc_reg *gas_t;
  386. acpi_handle handle = pr->handle;
  387. unsigned int num_ent, i, cpc_rev;
  388. acpi_status status;
  389. int ret = -EFAULT;
  390. /* Parse the ACPI _CPC table for this cpu. */
  391. status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
  392. ACPI_TYPE_PACKAGE);
  393. if (ACPI_FAILURE(status)) {
  394. ret = -ENODEV;
  395. goto out_buf_free;
  396. }
  397. out_obj = (union acpi_object *) output.pointer;
  398. cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
  399. if (!cpc_ptr) {
  400. ret = -ENOMEM;
  401. goto out_buf_free;
  402. }
  403. /* First entry is NumEntries. */
  404. cpc_obj = &out_obj->package.elements[0];
  405. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  406. num_ent = cpc_obj->integer.value;
  407. } else {
  408. pr_debug("Unexpected entry type(%d) for NumEntries\n",
  409. cpc_obj->type);
  410. goto out_free;
  411. }
  412. /* Only support CPPCv2. Bail otherwise. */
  413. if (num_ent != CPPC_NUM_ENT) {
  414. pr_debug("Firmware exports %d entries. Expected: %d\n",
  415. num_ent, CPPC_NUM_ENT);
  416. goto out_free;
  417. }
  418. /* Second entry should be revision. */
  419. cpc_obj = &out_obj->package.elements[1];
  420. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  421. cpc_rev = cpc_obj->integer.value;
  422. } else {
  423. pr_debug("Unexpected entry type(%d) for Revision\n",
  424. cpc_obj->type);
  425. goto out_free;
  426. }
  427. if (cpc_rev != CPPC_REV) {
  428. pr_debug("Firmware exports revision:%d. Expected:%d\n",
  429. cpc_rev, CPPC_REV);
  430. goto out_free;
  431. }
  432. /* Iterate through remaining entries in _CPC */
  433. for (i = 2; i < num_ent; i++) {
  434. cpc_obj = &out_obj->package.elements[i];
  435. if (cpc_obj->type == ACPI_TYPE_INTEGER) {
  436. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
  437. cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
  438. } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
  439. gas_t = (struct cpc_reg *)
  440. cpc_obj->buffer.pointer;
  441. /*
  442. * The PCC Subspace index is encoded inside
  443. * the CPC table entries. The same PCC index
  444. * will be used for all the PCC entries,
  445. * so extract it only once.
  446. */
  447. if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  448. if (pcc_subspace_idx < 0)
  449. pcc_subspace_idx = gas_t->access_width;
  450. else if (pcc_subspace_idx != gas_t->access_width) {
  451. pr_debug("Mismatched PCC ids.\n");
  452. goto out_free;
  453. }
  454. } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
  455. /* Support only PCC and SYS MEM type regs */
  456. pr_debug("Unsupported register type: %d\n", gas_t->space_id);
  457. goto out_free;
  458. }
  459. cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
  460. memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
  461. } else {
  462. pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
  463. goto out_free;
  464. }
  465. }
  466. /* Store CPU Logical ID */
  467. cpc_ptr->cpu_id = pr->id;
  468. /* Plug it into this CPUs CPC descriptor. */
  469. per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
  470. /* Parse PSD data for this CPU */
  471. ret = acpi_get_psd(cpc_ptr, handle);
  472. if (ret)
  473. goto out_free;
  474. /* Register PCC channel once for all CPUs. */
  475. if (!pcc_channel_acquired) {
  476. ret = register_pcc_channel(pcc_subspace_idx);
  477. if (ret)
  478. goto out_free;
  479. }
  480. /* Everything looks okay */
  481. pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
  482. kfree(output.pointer);
  483. return 0;
  484. out_free:
  485. kfree(cpc_ptr);
  486. out_buf_free:
  487. kfree(output.pointer);
  488. return ret;
  489. }
  490. EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
  491. /**
  492. * acpi_cppc_processor_exit - Cleanup CPC structs.
  493. * @pr: Ptr to acpi_processor containing this CPUs logical Id.
  494. *
  495. * Return: Void
  496. */
  497. void acpi_cppc_processor_exit(struct acpi_processor *pr)
  498. {
  499. struct cpc_desc *cpc_ptr;
  500. cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
  501. kfree(cpc_ptr);
  502. }
  503. EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
  504. static u64 get_phys_addr(struct cpc_reg *reg)
  505. {
  506. /* PCC communication addr space begins at byte offset 0x8. */
  507. if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
  508. return (u64)comm_base_addr + 0x8 + reg->address;
  509. else
  510. return reg->address;
  511. }
  512. static void cpc_read(struct cpc_reg *reg, u64 *val)
  513. {
  514. u64 addr = get_phys_addr(reg);
  515. acpi_os_read_memory((acpi_physical_address)addr,
  516. val, reg->bit_width);
  517. }
  518. static void cpc_write(struct cpc_reg *reg, u64 val)
  519. {
  520. u64 addr = get_phys_addr(reg);
  521. acpi_os_write_memory((acpi_physical_address)addr,
  522. val, reg->bit_width);
  523. }
  524. /**
  525. * cppc_get_perf_caps - Get a CPUs performance capabilities.
  526. * @cpunum: CPU from which to get capabilities info.
  527. * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
  528. *
  529. * Return: 0 for success with perf_caps populated else -ERRNO.
  530. */
  531. int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
  532. {
  533. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  534. struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
  535. *nom_perf;
  536. u64 high, low, ref, nom;
  537. int ret = 0;
  538. if (!cpc_desc) {
  539. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  540. return -ENODEV;
  541. }
  542. highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
  543. lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
  544. ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
  545. nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
  546. spin_lock(&pcc_lock);
  547. /* Are any of the regs PCC ?*/
  548. if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
  549. (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
  550. (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
  551. (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
  552. /* Ring doorbell once to update PCC subspace */
  553. if (send_pcc_cmd(CMD_READ) < 0) {
  554. ret = -EIO;
  555. goto out_err;
  556. }
  557. }
  558. cpc_read(&highest_reg->cpc_entry.reg, &high);
  559. perf_caps->highest_perf = high;
  560. cpc_read(&lowest_reg->cpc_entry.reg, &low);
  561. perf_caps->lowest_perf = low;
  562. cpc_read(&ref_perf->cpc_entry.reg, &ref);
  563. perf_caps->reference_perf = ref;
  564. cpc_read(&nom_perf->cpc_entry.reg, &nom);
  565. perf_caps->nominal_perf = nom;
  566. if (!ref)
  567. perf_caps->reference_perf = perf_caps->nominal_perf;
  568. if (!high || !low || !nom)
  569. ret = -EFAULT;
  570. out_err:
  571. spin_unlock(&pcc_lock);
  572. return ret;
  573. }
  574. EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
  575. /**
  576. * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
  577. * @cpunum: CPU from which to read counters.
  578. * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
  579. *
  580. * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
  581. */
  582. int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
  583. {
  584. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
  585. struct cpc_register_resource *delivered_reg, *reference_reg;
  586. u64 delivered, reference;
  587. int ret = 0;
  588. if (!cpc_desc) {
  589. pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
  590. return -ENODEV;
  591. }
  592. delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
  593. reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
  594. spin_lock(&pcc_lock);
  595. /* Are any of the regs PCC ?*/
  596. if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
  597. (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
  598. /* Ring doorbell once to update PCC subspace */
  599. if (send_pcc_cmd(CMD_READ) < 0) {
  600. ret = -EIO;
  601. goto out_err;
  602. }
  603. }
  604. cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
  605. cpc_read(&reference_reg->cpc_entry.reg, &reference);
  606. if (!delivered || !reference) {
  607. ret = -EFAULT;
  608. goto out_err;
  609. }
  610. perf_fb_ctrs->delivered = delivered;
  611. perf_fb_ctrs->reference = reference;
  612. perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
  613. perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
  614. perf_fb_ctrs->prev_delivered = delivered;
  615. perf_fb_ctrs->prev_reference = reference;
  616. out_err:
  617. spin_unlock(&pcc_lock);
  618. return ret;
  619. }
  620. EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
  621. /**
  622. * cppc_set_perf - Set a CPUs performance controls.
  623. * @cpu: CPU for which to set performance controls.
  624. * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
  625. *
  626. * Return: 0 for success, -ERRNO otherwise.
  627. */
  628. int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
  629. {
  630. struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
  631. struct cpc_register_resource *desired_reg;
  632. int ret = 0;
  633. if (!cpc_desc) {
  634. pr_debug("No CPC descriptor for CPU:%d\n", cpu);
  635. return -ENODEV;
  636. }
  637. desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
  638. spin_lock(&pcc_lock);
  639. /* If this is PCC reg, check if channel is free before writing */
  640. if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  641. ret = check_pcc_chan();
  642. if (ret)
  643. goto busy_channel;
  644. }
  645. /*
  646. * Skip writing MIN/MAX until Linux knows how to come up with
  647. * useful values.
  648. */
  649. cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
  650. /* Is this a PCC reg ?*/
  651. if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
  652. /* Ring doorbell so Remote can get our perf request. */
  653. if (send_pcc_cmd(CMD_WRITE) < 0)
  654. ret = -EIO;
  655. }
  656. busy_channel:
  657. spin_unlock(&pcc_lock);
  658. return ret;
  659. }
  660. EXPORT_SYMBOL_GPL(cppc_set_perf);