nmi_timer_int.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /**
  2. * @file nmi_timer_int.c
  3. *
  4. * @remark Copyright 2011 Advanced Micro Devices, Inc.
  5. *
  6. * @author Robert Richter <robert.richter@amd.com>
  7. */
  8. #include <linux/init.h>
  9. #include <linux/smp.h>
  10. #include <linux/errno.h>
  11. #include <linux/oprofile.h>
  12. #include <linux/perf_event.h>
  13. #ifdef CONFIG_OPROFILE_NMI_TIMER
  14. static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
  15. static int ctr_running;
  16. static struct perf_event_attr nmi_timer_attr = {
  17. .type = PERF_TYPE_HARDWARE,
  18. .config = PERF_COUNT_HW_CPU_CYCLES,
  19. .size = sizeof(struct perf_event_attr),
  20. .pinned = 1,
  21. .disabled = 1,
  22. };
  23. static void nmi_timer_callback(struct perf_event *event,
  24. struct perf_sample_data *data,
  25. struct pt_regs *regs)
  26. {
  27. event->hw.interrupts = 0; /* don't throttle interrupts */
  28. oprofile_add_sample(regs, 0);
  29. }
  30. static int nmi_timer_start_cpu(int cpu)
  31. {
  32. struct perf_event *event = per_cpu(nmi_timer_events, cpu);
  33. if (!event) {
  34. event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
  35. nmi_timer_callback, NULL);
  36. if (IS_ERR(event))
  37. return PTR_ERR(event);
  38. per_cpu(nmi_timer_events, cpu) = event;
  39. }
  40. if (event && ctr_running)
  41. perf_event_enable(event);
  42. return 0;
  43. }
  44. static void nmi_timer_stop_cpu(int cpu)
  45. {
  46. struct perf_event *event = per_cpu(nmi_timer_events, cpu);
  47. if (event && ctr_running)
  48. perf_event_disable(event);
  49. }
  50. static int nmi_timer_cpu_online(unsigned int cpu)
  51. {
  52. nmi_timer_start_cpu(cpu);
  53. return 0;
  54. }
  55. static int nmi_timer_cpu_predown(unsigned int cpu)
  56. {
  57. nmi_timer_stop_cpu(cpu);
  58. return 0;
  59. }
  60. static int nmi_timer_start(void)
  61. {
  62. int cpu;
  63. get_online_cpus();
  64. ctr_running = 1;
  65. for_each_online_cpu(cpu)
  66. nmi_timer_start_cpu(cpu);
  67. put_online_cpus();
  68. return 0;
  69. }
  70. static void nmi_timer_stop(void)
  71. {
  72. int cpu;
  73. get_online_cpus();
  74. for_each_online_cpu(cpu)
  75. nmi_timer_stop_cpu(cpu);
  76. ctr_running = 0;
  77. put_online_cpus();
  78. }
  79. static enum cpuhp_state hp_online;
  80. static void nmi_timer_shutdown(void)
  81. {
  82. struct perf_event *event;
  83. int cpu;
  84. cpuhp_remove_state(hp_online);
  85. for_each_possible_cpu(cpu) {
  86. event = per_cpu(nmi_timer_events, cpu);
  87. if (!event)
  88. continue;
  89. perf_event_disable(event);
  90. per_cpu(nmi_timer_events, cpu) = NULL;
  91. perf_event_release_kernel(event);
  92. }
  93. }
  94. static int nmi_timer_setup(void)
  95. {
  96. int err;
  97. u64 period;
  98. /* clock cycles per tick: */
  99. period = (u64)cpu_khz * 1000;
  100. do_div(period, HZ);
  101. nmi_timer_attr.sample_period = period;
  102. err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
  103. nmi_timer_cpu_online, nmi_timer_cpu_predown);
  104. if (err < 0) {
  105. nmi_timer_shutdown();
  106. return err;
  107. }
  108. hp_online = err;
  109. return 0;
  110. }
  111. int __init op_nmi_timer_init(struct oprofile_operations *ops)
  112. {
  113. int err = 0;
  114. err = nmi_timer_setup();
  115. if (err)
  116. return err;
  117. nmi_timer_shutdown(); /* only check, don't alloc */
  118. ops->create_files = NULL;
  119. ops->setup = nmi_timer_setup;
  120. ops->shutdown = nmi_timer_shutdown;
  121. ops->start = nmi_timer_start;
  122. ops->stop = nmi_timer_stop;
  123. ops->cpu_type = "timer";
  124. printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
  125. return 0;
  126. }
  127. #endif