perf_event.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. #undef DEBUG
  2. /*
  3. * ARM performance counter support.
  4. *
  5. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6. * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  7. *
  8. * This code is based on the sparc64 perf event code, which is in turn based
  9. * on the x86 code.
  10. */
  11. #define pr_fmt(fmt) "hw perfevents: " fmt
  12. #include <linux/kernel.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqdesc.h>
  17. #include <asm/irq_regs.h>
  18. #include <asm/pmu.h>
  19. static int
  20. armpmu_map_cache_event(const unsigned (*cache_map)
  21. [PERF_COUNT_HW_CACHE_MAX]
  22. [PERF_COUNT_HW_CACHE_OP_MAX]
  23. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  24. u64 config)
  25. {
  26. unsigned int cache_type, cache_op, cache_result, ret;
  27. cache_type = (config >> 0) & 0xff;
  28. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  29. return -EINVAL;
  30. cache_op = (config >> 8) & 0xff;
  31. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  32. return -EINVAL;
  33. cache_result = (config >> 16) & 0xff;
  34. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  35. return -EINVAL;
  36. ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  37. if (ret == CACHE_OP_UNSUPPORTED)
  38. return -ENOENT;
  39. return ret;
  40. }
  41. static int
  42. armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  43. {
  44. int mapping;
  45. if (config >= PERF_COUNT_HW_MAX)
  46. return -EINVAL;
  47. mapping = (*event_map)[config];
  48. return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  49. }
  50. static int
  51. armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  52. {
  53. return (int)(config & raw_event_mask);
  54. }
  55. int
  56. armpmu_map_event(struct perf_event *event,
  57. const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  58. const unsigned (*cache_map)
  59. [PERF_COUNT_HW_CACHE_MAX]
  60. [PERF_COUNT_HW_CACHE_OP_MAX]
  61. [PERF_COUNT_HW_CACHE_RESULT_MAX],
  62. u32 raw_event_mask)
  63. {
  64. u64 config = event->attr.config;
  65. int type = event->attr.type;
  66. if (type == event->pmu->type)
  67. return armpmu_map_raw_event(raw_event_mask, config);
  68. switch (type) {
  69. case PERF_TYPE_HARDWARE:
  70. return armpmu_map_hw_event(event_map, config);
  71. case PERF_TYPE_HW_CACHE:
  72. return armpmu_map_cache_event(cache_map, config);
  73. case PERF_TYPE_RAW:
  74. return armpmu_map_raw_event(raw_event_mask, config);
  75. }
  76. return -ENOENT;
  77. }
  78. int armpmu_event_set_period(struct perf_event *event)
  79. {
  80. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  81. struct hw_perf_event *hwc = &event->hw;
  82. s64 left = local64_read(&hwc->period_left);
  83. s64 period = hwc->sample_period;
  84. int ret = 0;
  85. if (unlikely(left <= -period)) {
  86. left = period;
  87. local64_set(&hwc->period_left, left);
  88. hwc->last_period = period;
  89. ret = 1;
  90. }
  91. if (unlikely(left <= 0)) {
  92. left += period;
  93. local64_set(&hwc->period_left, left);
  94. hwc->last_period = period;
  95. ret = 1;
  96. }
  97. /*
  98. * Limit the maximum period to prevent the counter value
  99. * from overtaking the one we are about to program. In
  100. * effect we are reducing max_period to account for
  101. * interrupt latency (and we are being very conservative).
  102. */
  103. if (left > (armpmu->max_period >> 1))
  104. left = armpmu->max_period >> 1;
  105. local64_set(&hwc->prev_count, (u64)-left);
  106. armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
  107. perf_event_update_userpage(event);
  108. return ret;
  109. }
  110. u64 armpmu_event_update(struct perf_event *event)
  111. {
  112. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  113. struct hw_perf_event *hwc = &event->hw;
  114. u64 delta, prev_raw_count, new_raw_count;
  115. again:
  116. prev_raw_count = local64_read(&hwc->prev_count);
  117. new_raw_count = armpmu->read_counter(event);
  118. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  119. new_raw_count) != prev_raw_count)
  120. goto again;
  121. delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
  122. local64_add(delta, &event->count);
  123. local64_sub(delta, &hwc->period_left);
  124. return new_raw_count;
  125. }
  126. static void
  127. armpmu_read(struct perf_event *event)
  128. {
  129. armpmu_event_update(event);
  130. }
  131. static void
  132. armpmu_stop(struct perf_event *event, int flags)
  133. {
  134. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  135. struct hw_perf_event *hwc = &event->hw;
  136. /*
  137. * ARM pmu always has to update the counter, so ignore
  138. * PERF_EF_UPDATE, see comments in armpmu_start().
  139. */
  140. if (!(hwc->state & PERF_HES_STOPPED)) {
  141. armpmu->disable(event);
  142. armpmu_event_update(event);
  143. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  144. }
  145. }
  146. static void armpmu_start(struct perf_event *event, int flags)
  147. {
  148. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  149. struct hw_perf_event *hwc = &event->hw;
  150. /*
  151. * ARM pmu always has to reprogram the period, so ignore
  152. * PERF_EF_RELOAD, see the comment below.
  153. */
  154. if (flags & PERF_EF_RELOAD)
  155. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  156. hwc->state = 0;
  157. /*
  158. * Set the period again. Some counters can't be stopped, so when we
  159. * were stopped we simply disabled the IRQ source and the counter
  160. * may have been left counting. If we don't do this step then we may
  161. * get an interrupt too soon or *way* too late if the overflow has
  162. * happened since disabling.
  163. */
  164. armpmu_event_set_period(event);
  165. armpmu->enable(event);
  166. }
  167. static void
  168. armpmu_del(struct perf_event *event, int flags)
  169. {
  170. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  171. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  172. struct hw_perf_event *hwc = &event->hw;
  173. int idx = hwc->idx;
  174. armpmu_stop(event, PERF_EF_UPDATE);
  175. hw_events->events[idx] = NULL;
  176. clear_bit(idx, hw_events->used_mask);
  177. if (armpmu->clear_event_idx)
  178. armpmu->clear_event_idx(hw_events, event);
  179. perf_event_update_userpage(event);
  180. }
  181. static int
  182. armpmu_add(struct perf_event *event, int flags)
  183. {
  184. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  185. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  186. struct hw_perf_event *hwc = &event->hw;
  187. int idx;
  188. int err = 0;
  189. perf_pmu_disable(event->pmu);
  190. /* If we don't have a space for the counter then finish early. */
  191. idx = armpmu->get_event_idx(hw_events, event);
  192. if (idx < 0) {
  193. err = idx;
  194. goto out;
  195. }
  196. /*
  197. * If there is an event in the counter we are going to use then make
  198. * sure it is disabled.
  199. */
  200. event->hw.idx = idx;
  201. armpmu->disable(event);
  202. hw_events->events[idx] = event;
  203. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  204. if (flags & PERF_EF_START)
  205. armpmu_start(event, PERF_EF_RELOAD);
  206. /* Propagate our changes to the userspace mapping. */
  207. perf_event_update_userpage(event);
  208. out:
  209. perf_pmu_enable(event->pmu);
  210. return err;
  211. }
  212. static int
  213. validate_event(struct pmu_hw_events *hw_events,
  214. struct perf_event *event)
  215. {
  216. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  217. if (is_software_event(event))
  218. return 1;
  219. if (event->state < PERF_EVENT_STATE_OFF)
  220. return 1;
  221. if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
  222. return 1;
  223. return armpmu->get_event_idx(hw_events, event) >= 0;
  224. }
  225. static int
  226. validate_group(struct perf_event *event)
  227. {
  228. struct perf_event *sibling, *leader = event->group_leader;
  229. struct pmu_hw_events fake_pmu;
  230. /*
  231. * Initialise the fake PMU. We only need to populate the
  232. * used_mask for the purposes of validation.
  233. */
  234. memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
  235. if (!validate_event(&fake_pmu, leader))
  236. return -EINVAL;
  237. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  238. if (!validate_event(&fake_pmu, sibling))
  239. return -EINVAL;
  240. }
  241. if (!validate_event(&fake_pmu, event))
  242. return -EINVAL;
  243. return 0;
  244. }
  245. static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
  246. {
  247. struct arm_pmu *armpmu;
  248. struct platform_device *plat_device;
  249. struct arm_pmu_platdata *plat;
  250. int ret;
  251. u64 start_clock, finish_clock;
  252. /*
  253. * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
  254. * the handlers expect a struct arm_pmu*. The percpu_irq framework will
  255. * do any necessary shifting, we just need to perform the first
  256. * dereference.
  257. */
  258. armpmu = *(void **)dev;
  259. plat_device = armpmu->plat_device;
  260. plat = dev_get_platdata(&plat_device->dev);
  261. start_clock = sched_clock();
  262. if (plat && plat->handle_irq)
  263. ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
  264. else
  265. ret = armpmu->handle_irq(irq, armpmu);
  266. finish_clock = sched_clock();
  267. perf_sample_event_took(finish_clock - start_clock);
  268. return ret;
  269. }
  270. static void
  271. armpmu_release_hardware(struct arm_pmu *armpmu)
  272. {
  273. armpmu->free_irq(armpmu);
  274. pm_runtime_put_sync(&armpmu->plat_device->dev);
  275. }
  276. static int
  277. armpmu_reserve_hardware(struct arm_pmu *armpmu)
  278. {
  279. int err;
  280. struct platform_device *pmu_device = armpmu->plat_device;
  281. if (!pmu_device)
  282. return -ENODEV;
  283. pm_runtime_get_sync(&pmu_device->dev);
  284. err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
  285. if (err) {
  286. armpmu_release_hardware(armpmu);
  287. return err;
  288. }
  289. return 0;
  290. }
  291. static void
  292. hw_perf_event_destroy(struct perf_event *event)
  293. {
  294. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  295. atomic_t *active_events = &armpmu->active_events;
  296. struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
  297. if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
  298. armpmu_release_hardware(armpmu);
  299. mutex_unlock(pmu_reserve_mutex);
  300. }
  301. }
  302. static int
  303. event_requires_mode_exclusion(struct perf_event_attr *attr)
  304. {
  305. return attr->exclude_idle || attr->exclude_user ||
  306. attr->exclude_kernel || attr->exclude_hv;
  307. }
  308. static int
  309. __hw_perf_event_init(struct perf_event *event)
  310. {
  311. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  312. struct hw_perf_event *hwc = &event->hw;
  313. int mapping;
  314. mapping = armpmu->map_event(event);
  315. if (mapping < 0) {
  316. pr_debug("event %x:%llx not supported\n", event->attr.type,
  317. event->attr.config);
  318. return mapping;
  319. }
  320. /*
  321. * We don't assign an index until we actually place the event onto
  322. * hardware. Use -1 to signify that we haven't decided where to put it
  323. * yet. For SMP systems, each core has it's own PMU so we can't do any
  324. * clever allocation or constraints checking at this point.
  325. */
  326. hwc->idx = -1;
  327. hwc->config_base = 0;
  328. hwc->config = 0;
  329. hwc->event_base = 0;
  330. /*
  331. * Check whether we need to exclude the counter from certain modes.
  332. */
  333. if ((!armpmu->set_event_filter ||
  334. armpmu->set_event_filter(hwc, &event->attr)) &&
  335. event_requires_mode_exclusion(&event->attr)) {
  336. pr_debug("ARM performance counters do not support "
  337. "mode exclusion\n");
  338. return -EOPNOTSUPP;
  339. }
  340. /*
  341. * Store the event encoding into the config_base field.
  342. */
  343. hwc->config_base |= (unsigned long)mapping;
  344. if (!is_sampling_event(event)) {
  345. /*
  346. * For non-sampling runs, limit the sample_period to half
  347. * of the counter width. That way, the new counter value
  348. * is far less likely to overtake the previous one unless
  349. * you have some serious IRQ latency issues.
  350. */
  351. hwc->sample_period = armpmu->max_period >> 1;
  352. hwc->last_period = hwc->sample_period;
  353. local64_set(&hwc->period_left, hwc->sample_period);
  354. }
  355. if (event->group_leader != event) {
  356. if (validate_group(event) != 0)
  357. return -EINVAL;
  358. }
  359. return 0;
  360. }
  361. static int armpmu_event_init(struct perf_event *event)
  362. {
  363. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  364. int err = 0;
  365. atomic_t *active_events = &armpmu->active_events;
  366. /* does not support taken branch sampling */
  367. if (has_branch_stack(event))
  368. return -EOPNOTSUPP;
  369. if (armpmu->map_event(event) == -ENOENT)
  370. return -ENOENT;
  371. event->destroy = hw_perf_event_destroy;
  372. if (!atomic_inc_not_zero(active_events)) {
  373. mutex_lock(&armpmu->reserve_mutex);
  374. if (atomic_read(active_events) == 0)
  375. err = armpmu_reserve_hardware(armpmu);
  376. if (!err)
  377. atomic_inc(active_events);
  378. mutex_unlock(&armpmu->reserve_mutex);
  379. }
  380. if (err)
  381. return err;
  382. err = __hw_perf_event_init(event);
  383. if (err)
  384. hw_perf_event_destroy(event);
  385. return err;
  386. }
  387. static void armpmu_enable(struct pmu *pmu)
  388. {
  389. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  390. struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
  391. int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
  392. if (enabled)
  393. armpmu->start(armpmu);
  394. }
  395. static void armpmu_disable(struct pmu *pmu)
  396. {
  397. struct arm_pmu *armpmu = to_arm_pmu(pmu);
  398. armpmu->stop(armpmu);
  399. }
  400. #ifdef CONFIG_PM
  401. static int armpmu_runtime_resume(struct device *dev)
  402. {
  403. struct arm_pmu_platdata *plat = dev_get_platdata(dev);
  404. if (plat && plat->runtime_resume)
  405. return plat->runtime_resume(dev);
  406. return 0;
  407. }
  408. static int armpmu_runtime_suspend(struct device *dev)
  409. {
  410. struct arm_pmu_platdata *plat = dev_get_platdata(dev);
  411. if (plat && plat->runtime_suspend)
  412. return plat->runtime_suspend(dev);
  413. return 0;
  414. }
  415. #endif
  416. const struct dev_pm_ops armpmu_dev_pm_ops = {
  417. SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
  418. };
  419. static void armpmu_init(struct arm_pmu *armpmu)
  420. {
  421. atomic_set(&armpmu->active_events, 0);
  422. mutex_init(&armpmu->reserve_mutex);
  423. armpmu->pmu = (struct pmu) {
  424. .pmu_enable = armpmu_enable,
  425. .pmu_disable = armpmu_disable,
  426. .event_init = armpmu_event_init,
  427. .add = armpmu_add,
  428. .del = armpmu_del,
  429. .start = armpmu_start,
  430. .stop = armpmu_stop,
  431. .read = armpmu_read,
  432. };
  433. }
  434. int armpmu_register(struct arm_pmu *armpmu, int type)
  435. {
  436. armpmu_init(armpmu);
  437. pm_runtime_enable(&armpmu->plat_device->dev);
  438. pr_info("enabled with %s PMU driver, %d counters available\n",
  439. armpmu->name, armpmu->num_events);
  440. return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
  441. }