pcr.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /* pcr.c: Generic sparc64 performance counter infrastructure.
  2. *
  3. * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/irq.h>
  9. #include <linux/irq_work.h>
  10. #include <linux/ftrace.h>
  11. #include <asm/pil.h>
  12. #include <asm/pcr.h>
  13. #include <asm/nmi.h>
  14. #include <asm/asi.h>
  15. #include <asm/spitfire.h>
  16. /* This code is shared between various users of the performance
  17. * counters. Users will be oprofile, pseudo-NMI watchdog, and the
  18. * perf_event support layer.
  19. */
  20. /* Performance counter interrupts run unmasked at PIL level 15.
  21. * Therefore we can't do things like wakeups and other work
  22. * that expects IRQ disabling to be adhered to in locking etc.
  23. *
  24. * Therefore in such situations we defer the work by signalling
  25. * a lower level cpu IRQ.
  26. */
  27. void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
  28. {
  29. struct pt_regs *old_regs;
  30. clear_softint(1 << PIL_DEFERRED_PCR_WORK);
  31. old_regs = set_irq_regs(regs);
  32. irq_enter();
  33. #ifdef CONFIG_IRQ_WORK
  34. irq_work_run();
  35. #endif
  36. irq_exit();
  37. set_irq_regs(old_regs);
  38. }
  39. void arch_irq_work_raise(void)
  40. {
  41. set_softint(1 << PIL_DEFERRED_PCR_WORK);
  42. }
  43. const struct pcr_ops *pcr_ops;
  44. EXPORT_SYMBOL_GPL(pcr_ops);
  45. static u64 direct_pcr_read(unsigned long reg_num)
  46. {
  47. u64 val;
  48. WARN_ON_ONCE(reg_num != 0);
  49. __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
  50. return val;
  51. }
  52. static void direct_pcr_write(unsigned long reg_num, u64 val)
  53. {
  54. WARN_ON_ONCE(reg_num != 0);
  55. __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
  56. }
  57. static u64 direct_pic_read(unsigned long reg_num)
  58. {
  59. u64 val;
  60. WARN_ON_ONCE(reg_num != 0);
  61. __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
  62. return val;
  63. }
  64. static void direct_pic_write(unsigned long reg_num, u64 val)
  65. {
  66. WARN_ON_ONCE(reg_num != 0);
  67. /* Blackbird errata workaround. See commentary in
  68. * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
  69. * for more information.
  70. */
  71. __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
  72. " nop\n\t"
  73. ".align 64\n"
  74. "99:wr %0, 0x0, %%pic\n\t"
  75. "rd %%pic, %%g0" : : "r" (val));
  76. }
  77. static u64 direct_picl_value(unsigned int nmi_hz)
  78. {
  79. u32 delta = local_cpu_data().clock_tick / nmi_hz;
  80. return ((u64)((0 - delta) & 0xffffffff)) << 32;
  81. }
  82. static const struct pcr_ops direct_pcr_ops = {
  83. .read_pcr = direct_pcr_read,
  84. .write_pcr = direct_pcr_write,
  85. .read_pic = direct_pic_read,
  86. .write_pic = direct_pic_write,
  87. .nmi_picl_value = direct_picl_value,
  88. .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
  89. .pcr_nmi_disable = PCR_PIC_PRIV,
  90. };
  91. static void n2_pcr_write(unsigned long reg_num, u64 val)
  92. {
  93. unsigned long ret;
  94. WARN_ON_ONCE(reg_num != 0);
  95. if (val & PCR_N2_HTRACE) {
  96. ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
  97. if (ret != HV_EOK)
  98. direct_pcr_write(reg_num, val);
  99. } else
  100. direct_pcr_write(reg_num, val);
  101. }
  102. static u64 n2_picl_value(unsigned int nmi_hz)
  103. {
  104. u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
  105. return ((u64)((0 - delta) & 0xffffffff)) << 32;
  106. }
  107. static const struct pcr_ops n2_pcr_ops = {
  108. .read_pcr = direct_pcr_read,
  109. .write_pcr = n2_pcr_write,
  110. .read_pic = direct_pic_read,
  111. .write_pic = direct_pic_write,
  112. .nmi_picl_value = n2_picl_value,
  113. .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
  114. PCR_N2_TOE_OV1 |
  115. (2 << PCR_N2_SL1_SHIFT) |
  116. (0xff << PCR_N2_MASK1_SHIFT)),
  117. .pcr_nmi_disable = PCR_PIC_PRIV,
  118. };
  119. static u64 n4_pcr_read(unsigned long reg_num)
  120. {
  121. unsigned long val;
  122. (void) sun4v_vt_get_perfreg(reg_num, &val);
  123. return val;
  124. }
  125. static void n4_pcr_write(unsigned long reg_num, u64 val)
  126. {
  127. (void) sun4v_vt_set_perfreg(reg_num, val);
  128. }
  129. static u64 n4_pic_read(unsigned long reg_num)
  130. {
  131. unsigned long val;
  132. __asm__ __volatile__("ldxa [%1] %2, %0"
  133. : "=r" (val)
  134. : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
  135. return val;
  136. }
  137. static void n4_pic_write(unsigned long reg_num, u64 val)
  138. {
  139. __asm__ __volatile__("stxa %0, [%1] %2"
  140. : /* no outputs */
  141. : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
  142. }
  143. static u64 n4_picl_value(unsigned int nmi_hz)
  144. {
  145. u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
  146. return ((u64)((0 - delta) & 0xffffffff));
  147. }
  148. static const struct pcr_ops n4_pcr_ops = {
  149. .read_pcr = n4_pcr_read,
  150. .write_pcr = n4_pcr_write,
  151. .read_pic = n4_pic_read,
  152. .write_pic = n4_pic_write,
  153. .nmi_picl_value = n4_picl_value,
  154. .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
  155. PCR_N4_UTRACE | PCR_N4_TOE |
  156. (26 << PCR_N4_SL_SHIFT)),
  157. .pcr_nmi_disable = PCR_N4_PICNPT,
  158. };
  159. static u64 n5_pcr_read(unsigned long reg_num)
  160. {
  161. unsigned long val;
  162. (void) sun4v_t5_get_perfreg(reg_num, &val);
  163. return val;
  164. }
  165. static void n5_pcr_write(unsigned long reg_num, u64 val)
  166. {
  167. (void) sun4v_t5_set_perfreg(reg_num, val);
  168. }
  169. static const struct pcr_ops n5_pcr_ops = {
  170. .read_pcr = n5_pcr_read,
  171. .write_pcr = n5_pcr_write,
  172. .read_pic = n4_pic_read,
  173. .write_pic = n4_pic_write,
  174. .nmi_picl_value = n4_picl_value,
  175. .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
  176. PCR_N4_UTRACE | PCR_N4_TOE |
  177. (26 << PCR_N4_SL_SHIFT)),
  178. .pcr_nmi_disable = PCR_N4_PICNPT,
  179. };
  180. static unsigned long perf_hsvc_group;
  181. static unsigned long perf_hsvc_major;
  182. static unsigned long perf_hsvc_minor;
  183. static int __init register_perf_hsvc(void)
  184. {
  185. unsigned long hverror;
  186. if (tlb_type == hypervisor) {
  187. switch (sun4v_chip_type) {
  188. case SUN4V_CHIP_NIAGARA1:
  189. perf_hsvc_group = HV_GRP_NIAG_PERF;
  190. break;
  191. case SUN4V_CHIP_NIAGARA2:
  192. perf_hsvc_group = HV_GRP_N2_CPU;
  193. break;
  194. case SUN4V_CHIP_NIAGARA3:
  195. perf_hsvc_group = HV_GRP_KT_CPU;
  196. break;
  197. case SUN4V_CHIP_NIAGARA4:
  198. perf_hsvc_group = HV_GRP_VT_CPU;
  199. break;
  200. case SUN4V_CHIP_NIAGARA5:
  201. perf_hsvc_group = HV_GRP_T5_CPU;
  202. break;
  203. default:
  204. return -ENODEV;
  205. }
  206. perf_hsvc_major = 1;
  207. perf_hsvc_minor = 0;
  208. hverror = sun4v_hvapi_register(perf_hsvc_group,
  209. perf_hsvc_major,
  210. &perf_hsvc_minor);
  211. if (hverror) {
  212. pr_err("perfmon: Could not register hvapi(0x%lx).\n",
  213. hverror);
  214. return -ENODEV;
  215. }
  216. }
  217. return 0;
  218. }
  219. static void __init unregister_perf_hsvc(void)
  220. {
  221. if (tlb_type != hypervisor)
  222. return;
  223. sun4v_hvapi_unregister(perf_hsvc_group);
  224. }
  225. static int __init setup_sun4v_pcr_ops(void)
  226. {
  227. int ret = 0;
  228. switch (sun4v_chip_type) {
  229. case SUN4V_CHIP_NIAGARA1:
  230. case SUN4V_CHIP_NIAGARA2:
  231. case SUN4V_CHIP_NIAGARA3:
  232. pcr_ops = &n2_pcr_ops;
  233. break;
  234. case SUN4V_CHIP_NIAGARA4:
  235. pcr_ops = &n4_pcr_ops;
  236. break;
  237. case SUN4V_CHIP_NIAGARA5:
  238. pcr_ops = &n5_pcr_ops;
  239. break;
  240. default:
  241. ret = -ENODEV;
  242. break;
  243. }
  244. return ret;
  245. }
  246. int __init pcr_arch_init(void)
  247. {
  248. int err = register_perf_hsvc();
  249. if (err)
  250. return err;
  251. switch (tlb_type) {
  252. case hypervisor:
  253. err = setup_sun4v_pcr_ops();
  254. if (err)
  255. goto out_unregister;
  256. break;
  257. case cheetah:
  258. case cheetah_plus:
  259. pcr_ops = &direct_pcr_ops;
  260. break;
  261. case spitfire:
  262. /* UltraSPARC-I/II and derivatives lack a profile
  263. * counter overflow interrupt so we can't make use of
  264. * their hardware currently.
  265. */
  266. /* fallthrough */
  267. default:
  268. err = -ENODEV;
  269. goto out_unregister;
  270. }
  271. return nmi_init();
  272. out_unregister:
  273. unregister_perf_hsvc();
  274. return err;
  275. }