uncore.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /*
  2. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Author: Jacob Shin <jacob.shin@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/perf_event.h>
  11. #include <linux/percpu.h>
  12. #include <linux/types.h>
  13. #include <linux/slab.h>
  14. #include <linux/init.h>
  15. #include <linux/cpu.h>
  16. #include <linux/cpumask.h>
  17. #include <asm/cpufeature.h>
  18. #include <asm/perf_event.h>
  19. #include <asm/msr.h>
  20. #include <asm/smp.h>
  21. #define NUM_COUNTERS_NB 4
  22. #define NUM_COUNTERS_L2 4
  23. #define NUM_COUNTERS_L3 6
  24. #define MAX_COUNTERS 6
  25. #define RDPMC_BASE_NB 6
  26. #define RDPMC_BASE_LLC 10
  27. #define COUNTER_SHIFT 16
  28. #undef pr_fmt
  29. #define pr_fmt(fmt) "amd_uncore: " fmt
  30. static int num_counters_llc;
  31. static int num_counters_nb;
  32. static bool l3_mask;
  33. static HLIST_HEAD(uncore_unused_list);
  34. struct amd_uncore {
  35. int id;
  36. int refcnt;
  37. int cpu;
  38. int num_counters;
  39. int rdpmc_base;
  40. u32 msr_base;
  41. cpumask_t *active_mask;
  42. struct pmu *pmu;
  43. struct perf_event *events[MAX_COUNTERS];
  44. struct hlist_node node;
  45. };
  46. static struct amd_uncore * __percpu *amd_uncore_nb;
  47. static struct amd_uncore * __percpu *amd_uncore_llc;
  48. static struct pmu amd_nb_pmu;
  49. static struct pmu amd_llc_pmu;
  50. static cpumask_t amd_nb_active_mask;
  51. static cpumask_t amd_llc_active_mask;
  52. static bool is_nb_event(struct perf_event *event)
  53. {
  54. return event->pmu->type == amd_nb_pmu.type;
  55. }
  56. static bool is_llc_event(struct perf_event *event)
  57. {
  58. return event->pmu->type == amd_llc_pmu.type;
  59. }
  60. static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
  61. {
  62. if (is_nb_event(event) && amd_uncore_nb)
  63. return *per_cpu_ptr(amd_uncore_nb, event->cpu);
  64. else if (is_llc_event(event) && amd_uncore_llc)
  65. return *per_cpu_ptr(amd_uncore_llc, event->cpu);
  66. return NULL;
  67. }
  68. static void amd_uncore_read(struct perf_event *event)
  69. {
  70. struct hw_perf_event *hwc = &event->hw;
  71. u64 prev, new;
  72. s64 delta;
  73. /*
  74. * since we do not enable counter overflow interrupts,
  75. * we do not have to worry about prev_count changing on us
  76. */
  77. prev = local64_read(&hwc->prev_count);
  78. rdpmcl(hwc->event_base_rdpmc, new);
  79. local64_set(&hwc->prev_count, new);
  80. delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
  81. delta >>= COUNTER_SHIFT;
  82. local64_add(delta, &event->count);
  83. }
  84. static void amd_uncore_start(struct perf_event *event, int flags)
  85. {
  86. struct hw_perf_event *hwc = &event->hw;
  87. if (flags & PERF_EF_RELOAD)
  88. wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
  89. hwc->state = 0;
  90. wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
  91. perf_event_update_userpage(event);
  92. }
  93. static void amd_uncore_stop(struct perf_event *event, int flags)
  94. {
  95. struct hw_perf_event *hwc = &event->hw;
  96. wrmsrl(hwc->config_base, hwc->config);
  97. hwc->state |= PERF_HES_STOPPED;
  98. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  99. amd_uncore_read(event);
  100. hwc->state |= PERF_HES_UPTODATE;
  101. }
  102. }
  103. static int amd_uncore_add(struct perf_event *event, int flags)
  104. {
  105. int i;
  106. struct amd_uncore *uncore = event_to_amd_uncore(event);
  107. struct hw_perf_event *hwc = &event->hw;
  108. /* are we already assigned? */
  109. if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
  110. goto out;
  111. for (i = 0; i < uncore->num_counters; i++) {
  112. if (uncore->events[i] == event) {
  113. hwc->idx = i;
  114. goto out;
  115. }
  116. }
  117. /* if not, take the first available counter */
  118. hwc->idx = -1;
  119. for (i = 0; i < uncore->num_counters; i++) {
  120. if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
  121. hwc->idx = i;
  122. break;
  123. }
  124. }
  125. out:
  126. if (hwc->idx == -1)
  127. return -EBUSY;
  128. hwc->config_base = uncore->msr_base + (2 * hwc->idx);
  129. hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
  130. hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
  131. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  132. if (flags & PERF_EF_START)
  133. amd_uncore_start(event, PERF_EF_RELOAD);
  134. return 0;
  135. }
  136. static void amd_uncore_del(struct perf_event *event, int flags)
  137. {
  138. int i;
  139. struct amd_uncore *uncore = event_to_amd_uncore(event);
  140. struct hw_perf_event *hwc = &event->hw;
  141. amd_uncore_stop(event, PERF_EF_UPDATE);
  142. for (i = 0; i < uncore->num_counters; i++) {
  143. if (cmpxchg(&uncore->events[i], event, NULL) == event)
  144. break;
  145. }
  146. hwc->idx = -1;
  147. }
  148. static int amd_uncore_event_init(struct perf_event *event)
  149. {
  150. struct amd_uncore *uncore;
  151. struct hw_perf_event *hwc = &event->hw;
  152. if (event->attr.type != event->pmu->type)
  153. return -ENOENT;
  154. /*
  155. * NB and Last level cache counters (MSRs) are shared across all cores
  156. * that share the same NB / Last level cache. Interrupts can be directed
  157. * to a single target core, however, event counts generated by processes
  158. * running on other cores cannot be masked out. So we do not support
  159. * sampling and per-thread events.
  160. */
  161. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  162. return -EINVAL;
  163. /* NB and Last level cache counters do not have usr/os/guest/host bits */
  164. if (event->attr.exclude_user || event->attr.exclude_kernel ||
  165. event->attr.exclude_host || event->attr.exclude_guest)
  166. return -EINVAL;
  167. /* and we do not enable counter overflow interrupts */
  168. hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
  169. hwc->idx = -1;
  170. /*
  171. * SliceMask and ThreadMask need to be set for certain L3 events in
  172. * Family 17h. For other events, the two fields do not affect the count.
  173. */
  174. if (l3_mask)
  175. hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
  176. if (event->cpu < 0)
  177. return -EINVAL;
  178. uncore = event_to_amd_uncore(event);
  179. if (!uncore)
  180. return -ENODEV;
  181. /*
  182. * since request can come in to any of the shared cores, we will remap
  183. * to a single common cpu.
  184. */
  185. event->cpu = uncore->cpu;
  186. return 0;
  187. }
  188. static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
  189. struct device_attribute *attr,
  190. char *buf)
  191. {
  192. cpumask_t *active_mask;
  193. struct pmu *pmu = dev_get_drvdata(dev);
  194. if (pmu->type == amd_nb_pmu.type)
  195. active_mask = &amd_nb_active_mask;
  196. else if (pmu->type == amd_llc_pmu.type)
  197. active_mask = &amd_llc_active_mask;
  198. else
  199. return 0;
  200. return cpumap_print_to_pagebuf(true, buf, active_mask);
  201. }
  202. static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
  203. static struct attribute *amd_uncore_attrs[] = {
  204. &dev_attr_cpumask.attr,
  205. NULL,
  206. };
  207. static struct attribute_group amd_uncore_attr_group = {
  208. .attrs = amd_uncore_attrs,
  209. };
  210. /*
  211. * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
  212. * on family
  213. */
  214. #define AMD_FORMAT_ATTR(_dev, _name, _format) \
  215. static ssize_t \
  216. _dev##_show##_name(struct device *dev, \
  217. struct device_attribute *attr, \
  218. char *page) \
  219. { \
  220. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  221. return sprintf(page, _format "\n"); \
  222. } \
  223. static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
  224. /* Used for each uncore counter type */
  225. #define AMD_ATTRIBUTE(_name) \
  226. static struct attribute *amd_uncore_format_attr_##_name[] = { \
  227. &format_attr_event_##_name.attr, \
  228. &format_attr_umask.attr, \
  229. NULL, \
  230. }; \
  231. static struct attribute_group amd_uncore_format_group_##_name = { \
  232. .name = "format", \
  233. .attrs = amd_uncore_format_attr_##_name, \
  234. }; \
  235. static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
  236. &amd_uncore_attr_group, \
  237. &amd_uncore_format_group_##_name, \
  238. NULL, \
  239. };
  240. AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
  241. AMD_FORMAT_ATTR(umask, , "config:8-15");
  242. AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
  243. AMD_FORMAT_ATTR(event, _l3, "config:0-7");
  244. AMD_ATTRIBUTE(df);
  245. AMD_ATTRIBUTE(l3);
  246. static struct pmu amd_nb_pmu = {
  247. .task_ctx_nr = perf_invalid_context,
  248. .event_init = amd_uncore_event_init,
  249. .add = amd_uncore_add,
  250. .del = amd_uncore_del,
  251. .start = amd_uncore_start,
  252. .stop = amd_uncore_stop,
  253. .read = amd_uncore_read,
  254. };
  255. static struct pmu amd_llc_pmu = {
  256. .task_ctx_nr = perf_invalid_context,
  257. .event_init = amd_uncore_event_init,
  258. .add = amd_uncore_add,
  259. .del = amd_uncore_del,
  260. .start = amd_uncore_start,
  261. .stop = amd_uncore_stop,
  262. .read = amd_uncore_read,
  263. };
  264. static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
  265. {
  266. return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
  267. cpu_to_node(cpu));
  268. }
  269. static int amd_uncore_cpu_up_prepare(unsigned int cpu)
  270. {
  271. struct amd_uncore *uncore_nb = NULL, *uncore_llc;
  272. if (amd_uncore_nb) {
  273. uncore_nb = amd_uncore_alloc(cpu);
  274. if (!uncore_nb)
  275. goto fail;
  276. uncore_nb->cpu = cpu;
  277. uncore_nb->num_counters = num_counters_nb;
  278. uncore_nb->rdpmc_base = RDPMC_BASE_NB;
  279. uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
  280. uncore_nb->active_mask = &amd_nb_active_mask;
  281. uncore_nb->pmu = &amd_nb_pmu;
  282. uncore_nb->id = -1;
  283. *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
  284. }
  285. if (amd_uncore_llc) {
  286. uncore_llc = amd_uncore_alloc(cpu);
  287. if (!uncore_llc)
  288. goto fail;
  289. uncore_llc->cpu = cpu;
  290. uncore_llc->num_counters = num_counters_llc;
  291. uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
  292. uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
  293. uncore_llc->active_mask = &amd_llc_active_mask;
  294. uncore_llc->pmu = &amd_llc_pmu;
  295. uncore_llc->id = -1;
  296. *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
  297. }
  298. return 0;
  299. fail:
  300. if (amd_uncore_nb)
  301. *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
  302. kfree(uncore_nb);
  303. return -ENOMEM;
  304. }
  305. static struct amd_uncore *
  306. amd_uncore_find_online_sibling(struct amd_uncore *this,
  307. struct amd_uncore * __percpu *uncores)
  308. {
  309. unsigned int cpu;
  310. struct amd_uncore *that;
  311. for_each_online_cpu(cpu) {
  312. that = *per_cpu_ptr(uncores, cpu);
  313. if (!that)
  314. continue;
  315. if (this == that)
  316. continue;
  317. if (this->id == that->id) {
  318. hlist_add_head(&this->node, &uncore_unused_list);
  319. this = that;
  320. break;
  321. }
  322. }
  323. this->refcnt++;
  324. return this;
  325. }
  326. static int amd_uncore_cpu_starting(unsigned int cpu)
  327. {
  328. unsigned int eax, ebx, ecx, edx;
  329. struct amd_uncore *uncore;
  330. if (amd_uncore_nb) {
  331. uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
  332. cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
  333. uncore->id = ecx & 0xff;
  334. uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
  335. *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
  336. }
  337. if (amd_uncore_llc) {
  338. uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
  339. uncore->id = per_cpu(cpu_llc_id, cpu);
  340. uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
  341. *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
  342. }
  343. return 0;
  344. }
  345. static void uncore_clean_online(void)
  346. {
  347. struct amd_uncore *uncore;
  348. struct hlist_node *n;
  349. hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
  350. hlist_del(&uncore->node);
  351. kfree(uncore);
  352. }
  353. }
  354. static void uncore_online(unsigned int cpu,
  355. struct amd_uncore * __percpu *uncores)
  356. {
  357. struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
  358. uncore_clean_online();
  359. if (cpu == uncore->cpu)
  360. cpumask_set_cpu(cpu, uncore->active_mask);
  361. }
  362. static int amd_uncore_cpu_online(unsigned int cpu)
  363. {
  364. if (amd_uncore_nb)
  365. uncore_online(cpu, amd_uncore_nb);
  366. if (amd_uncore_llc)
  367. uncore_online(cpu, amd_uncore_llc);
  368. return 0;
  369. }
  370. static void uncore_down_prepare(unsigned int cpu,
  371. struct amd_uncore * __percpu *uncores)
  372. {
  373. unsigned int i;
  374. struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
  375. if (this->cpu != cpu)
  376. return;
  377. /* this cpu is going down, migrate to a shared sibling if possible */
  378. for_each_online_cpu(i) {
  379. struct amd_uncore *that = *per_cpu_ptr(uncores, i);
  380. if (cpu == i)
  381. continue;
  382. if (this == that) {
  383. perf_pmu_migrate_context(this->pmu, cpu, i);
  384. cpumask_clear_cpu(cpu, that->active_mask);
  385. cpumask_set_cpu(i, that->active_mask);
  386. that->cpu = i;
  387. break;
  388. }
  389. }
  390. }
  391. static int amd_uncore_cpu_down_prepare(unsigned int cpu)
  392. {
  393. if (amd_uncore_nb)
  394. uncore_down_prepare(cpu, amd_uncore_nb);
  395. if (amd_uncore_llc)
  396. uncore_down_prepare(cpu, amd_uncore_llc);
  397. return 0;
  398. }
  399. static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
  400. {
  401. struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
  402. if (cpu == uncore->cpu)
  403. cpumask_clear_cpu(cpu, uncore->active_mask);
  404. if (!--uncore->refcnt)
  405. kfree(uncore);
  406. *per_cpu_ptr(uncores, cpu) = NULL;
  407. }
  408. static int amd_uncore_cpu_dead(unsigned int cpu)
  409. {
  410. if (amd_uncore_nb)
  411. uncore_dead(cpu, amd_uncore_nb);
  412. if (amd_uncore_llc)
  413. uncore_dead(cpu, amd_uncore_llc);
  414. return 0;
  415. }
  416. static int __init amd_uncore_init(void)
  417. {
  418. int ret = -ENODEV;
  419. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
  420. boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
  421. return -ENODEV;
  422. if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
  423. return -ENODEV;
  424. if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
  425. /*
  426. * For F17h or F18h, the Northbridge counters are
  427. * repurposed as Data Fabric counters. Also, L3
  428. * counters are supported too. The PMUs are exported
  429. * based on family as either L2 or L3 and NB or DF.
  430. */
  431. num_counters_nb = NUM_COUNTERS_NB;
  432. num_counters_llc = NUM_COUNTERS_L3;
  433. amd_nb_pmu.name = "amd_df";
  434. amd_llc_pmu.name = "amd_l3";
  435. format_attr_event_df.show = &event_show_df;
  436. format_attr_event_l3.show = &event_show_l3;
  437. l3_mask = true;
  438. } else {
  439. num_counters_nb = NUM_COUNTERS_NB;
  440. num_counters_llc = NUM_COUNTERS_L2;
  441. amd_nb_pmu.name = "amd_nb";
  442. amd_llc_pmu.name = "amd_l2";
  443. format_attr_event_df = format_attr_event;
  444. format_attr_event_l3 = format_attr_event;
  445. l3_mask = false;
  446. }
  447. amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
  448. amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
  449. if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
  450. amd_uncore_nb = alloc_percpu(struct amd_uncore *);
  451. if (!amd_uncore_nb) {
  452. ret = -ENOMEM;
  453. goto fail_nb;
  454. }
  455. ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
  456. if (ret)
  457. goto fail_nb;
  458. pr_info("%s NB counters detected\n",
  459. boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
  460. "HYGON" : "AMD");
  461. ret = 0;
  462. }
  463. if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
  464. amd_uncore_llc = alloc_percpu(struct amd_uncore *);
  465. if (!amd_uncore_llc) {
  466. ret = -ENOMEM;
  467. goto fail_llc;
  468. }
  469. ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
  470. if (ret)
  471. goto fail_llc;
  472. pr_info("%s LLC counters detected\n",
  473. boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
  474. "HYGON" : "AMD");
  475. ret = 0;
  476. }
  477. /*
  478. * Install callbacks. Core will call them for each online cpu.
  479. */
  480. if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
  481. "perf/x86/amd/uncore:prepare",
  482. amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
  483. goto fail_llc;
  484. if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
  485. "perf/x86/amd/uncore:starting",
  486. amd_uncore_cpu_starting, NULL))
  487. goto fail_prep;
  488. if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
  489. "perf/x86/amd/uncore:online",
  490. amd_uncore_cpu_online,
  491. amd_uncore_cpu_down_prepare))
  492. goto fail_start;
  493. return 0;
  494. fail_start:
  495. cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
  496. fail_prep:
  497. cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
  498. fail_llc:
  499. if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
  500. perf_pmu_unregister(&amd_nb_pmu);
  501. if (amd_uncore_llc)
  502. free_percpu(amd_uncore_llc);
  503. fail_nb:
  504. if (amd_uncore_nb)
  505. free_percpu(amd_uncore_nb);
  506. return ret;
  507. }
  508. device_initcall(amd_uncore_init);