op_model_mipsxx.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004, 05, 06 by Ralf Baechle
  7. * Copyright (C) 2005 by MIPS Technologies, Inc.
  8. */
  9. #include <linux/cpumask.h>
  10. #include <linux/oprofile.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/smp.h>
  13. #include <asm/irq_regs.h>
  14. #include <asm/time.h>
  15. #include "op_impl.h"
  16. #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
  17. MIPS_PERFCTRL_EVENT)
  18. #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
  19. #define M_COUNTER_OVERFLOW (1UL << 31)
  20. static int (*save_perf_irq)(void);
  21. static int perfcount_irq;
  22. /*
  23. * XLR has only one set of counters per core. Designate the
  24. * first hardware thread in the core for setup and init.
  25. * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  26. */
  27. #if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
  28. #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
  29. #else
  30. #define oprofile_skip_cpu(c) 0
  31. #endif
  32. #ifdef CONFIG_MIPS_MT_SMP
  33. static int cpu_has_mipsmt_pertccounters;
  34. #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
  35. M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
  36. #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
  37. 0 : cpu_data[smp_processor_id()].vpe_id)
  38. /*
  39. * The number of bits to shift to convert between counters per core and
  40. * counters per VPE. There is no reasonable interface atm to obtain the
  41. * number of VPEs used by Linux and in the 34K this number is fixed to two
  42. * anyways so we hardcore a few things here for the moment. The way it's
  43. * done here will ensure that oprofile VSMP kernel will run right on a lesser
  44. * core like a 24K also or with maxcpus=1.
  45. */
  46. static inline unsigned int vpe_shift(void)
  47. {
  48. if (num_possible_cpus() > 1)
  49. return 1;
  50. return 0;
  51. }
  52. #else
  53. #define WHAT 0
  54. #define vpe_id() 0
  55. static inline unsigned int vpe_shift(void)
  56. {
  57. return 0;
  58. }
  59. #endif
  60. static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
  61. {
  62. return counters >> vpe_shift();
  63. }
  64. static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
  65. {
  66. return counters << vpe_shift();
  67. }
  68. #define __define_perf_accessors(r, n, np) \
  69. \
  70. static inline unsigned int r_c0_ ## r ## n(void) \
  71. { \
  72. unsigned int cpu = vpe_id(); \
  73. \
  74. switch (cpu) { \
  75. case 0: \
  76. return read_c0_ ## r ## n(); \
  77. case 1: \
  78. return read_c0_ ## r ## np(); \
  79. default: \
  80. BUG(); \
  81. } \
  82. return 0; \
  83. } \
  84. \
  85. static inline void w_c0_ ## r ## n(unsigned int value) \
  86. { \
  87. unsigned int cpu = vpe_id(); \
  88. \
  89. switch (cpu) { \
  90. case 0: \
  91. write_c0_ ## r ## n(value); \
  92. return; \
  93. case 1: \
  94. write_c0_ ## r ## np(value); \
  95. return; \
  96. default: \
  97. BUG(); \
  98. } \
  99. return; \
  100. } \
  101. __define_perf_accessors(perfcntr, 0, 2)
  102. __define_perf_accessors(perfcntr, 1, 3)
  103. __define_perf_accessors(perfcntr, 2, 0)
  104. __define_perf_accessors(perfcntr, 3, 1)
  105. __define_perf_accessors(perfctrl, 0, 2)
  106. __define_perf_accessors(perfctrl, 1, 3)
  107. __define_perf_accessors(perfctrl, 2, 0)
  108. __define_perf_accessors(perfctrl, 3, 1)
  109. struct op_mips_model op_model_mipsxx_ops;
  110. static struct mipsxx_register_config {
  111. unsigned int control[4];
  112. unsigned int counter[4];
  113. } reg;
  114. /* Compute all of the registers in preparation for enabling profiling. */
  115. static void mipsxx_reg_setup(struct op_counter_config *ctr)
  116. {
  117. unsigned int counters = op_model_mipsxx_ops.num_counters;
  118. int i;
  119. /* Compute the performance counter control word. */
  120. for (i = 0; i < counters; i++) {
  121. reg.control[i] = 0;
  122. reg.counter[i] = 0;
  123. if (!ctr[i].enabled)
  124. continue;
  125. reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
  126. MIPS_PERFCTRL_IE;
  127. if (ctr[i].kernel)
  128. reg.control[i] |= MIPS_PERFCTRL_K;
  129. if (ctr[i].user)
  130. reg.control[i] |= MIPS_PERFCTRL_U;
  131. if (ctr[i].exl)
  132. reg.control[i] |= MIPS_PERFCTRL_EXL;
  133. if (boot_cpu_type() == CPU_XLR)
  134. reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
  135. reg.counter[i] = 0x80000000 - ctr[i].count;
  136. }
  137. }
  138. /* Program all of the registers in preparation for enabling profiling. */
  139. static void mipsxx_cpu_setup(void *args)
  140. {
  141. unsigned int counters = op_model_mipsxx_ops.num_counters;
  142. if (oprofile_skip_cpu(smp_processor_id()))
  143. return;
  144. switch (counters) {
  145. case 4:
  146. w_c0_perfctrl3(0);
  147. w_c0_perfcntr3(reg.counter[3]);
  148. case 3:
  149. w_c0_perfctrl2(0);
  150. w_c0_perfcntr2(reg.counter[2]);
  151. case 2:
  152. w_c0_perfctrl1(0);
  153. w_c0_perfcntr1(reg.counter[1]);
  154. case 1:
  155. w_c0_perfctrl0(0);
  156. w_c0_perfcntr0(reg.counter[0]);
  157. }
  158. }
  159. /* Start all counters on current CPU */
  160. static void mipsxx_cpu_start(void *args)
  161. {
  162. unsigned int counters = op_model_mipsxx_ops.num_counters;
  163. if (oprofile_skip_cpu(smp_processor_id()))
  164. return;
  165. switch (counters) {
  166. case 4:
  167. w_c0_perfctrl3(WHAT | reg.control[3]);
  168. case 3:
  169. w_c0_perfctrl2(WHAT | reg.control[2]);
  170. case 2:
  171. w_c0_perfctrl1(WHAT | reg.control[1]);
  172. case 1:
  173. w_c0_perfctrl0(WHAT | reg.control[0]);
  174. }
  175. }
  176. /* Stop all counters on current CPU */
  177. static void mipsxx_cpu_stop(void *args)
  178. {
  179. unsigned int counters = op_model_mipsxx_ops.num_counters;
  180. if (oprofile_skip_cpu(smp_processor_id()))
  181. return;
  182. switch (counters) {
  183. case 4:
  184. w_c0_perfctrl3(0);
  185. case 3:
  186. w_c0_perfctrl2(0);
  187. case 2:
  188. w_c0_perfctrl1(0);
  189. case 1:
  190. w_c0_perfctrl0(0);
  191. }
  192. }
  193. static int mipsxx_perfcount_handler(void)
  194. {
  195. unsigned int counters = op_model_mipsxx_ops.num_counters;
  196. unsigned int control;
  197. unsigned int counter;
  198. int handled = IRQ_NONE;
  199. if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
  200. return handled;
  201. switch (counters) {
  202. #define HANDLE_COUNTER(n) \
  203. case n + 1: \
  204. control = r_c0_perfctrl ## n(); \
  205. counter = r_c0_perfcntr ## n(); \
  206. if ((control & MIPS_PERFCTRL_IE) && \
  207. (counter & M_COUNTER_OVERFLOW)) { \
  208. oprofile_add_sample(get_irq_regs(), n); \
  209. w_c0_perfcntr ## n(reg.counter[n]); \
  210. handled = IRQ_HANDLED; \
  211. }
  212. HANDLE_COUNTER(3)
  213. HANDLE_COUNTER(2)
  214. HANDLE_COUNTER(1)
  215. HANDLE_COUNTER(0)
  216. }
  217. return handled;
  218. }
  219. static inline int __n_counters(void)
  220. {
  221. if (!cpu_has_perf)
  222. return 0;
  223. if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
  224. return 1;
  225. if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
  226. return 2;
  227. if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
  228. return 3;
  229. return 4;
  230. }
  231. static inline int n_counters(void)
  232. {
  233. int counters;
  234. switch (current_cpu_type()) {
  235. case CPU_R10000:
  236. counters = 2;
  237. break;
  238. case CPU_R12000:
  239. case CPU_R14000:
  240. case CPU_R16000:
  241. counters = 4;
  242. break;
  243. default:
  244. counters = __n_counters();
  245. }
  246. return counters;
  247. }
  248. static void reset_counters(void *arg)
  249. {
  250. int counters = (int)(long)arg;
  251. switch (counters) {
  252. case 4:
  253. w_c0_perfctrl3(0);
  254. w_c0_perfcntr3(0);
  255. case 3:
  256. w_c0_perfctrl2(0);
  257. w_c0_perfcntr2(0);
  258. case 2:
  259. w_c0_perfctrl1(0);
  260. w_c0_perfcntr1(0);
  261. case 1:
  262. w_c0_perfctrl0(0);
  263. w_c0_perfcntr0(0);
  264. }
  265. }
  266. static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
  267. {
  268. return mipsxx_perfcount_handler();
  269. }
  270. static int __init mipsxx_init(void)
  271. {
  272. int counters;
  273. counters = n_counters();
  274. if (counters == 0) {
  275. printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
  276. return -ENODEV;
  277. }
  278. #ifdef CONFIG_MIPS_MT_SMP
  279. cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
  280. if (!cpu_has_mipsmt_pertccounters)
  281. counters = counters_total_to_per_cpu(counters);
  282. #endif
  283. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  284. op_model_mipsxx_ops.num_counters = counters;
  285. switch (current_cpu_type()) {
  286. case CPU_M14KC:
  287. op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
  288. break;
  289. case CPU_M14KEC:
  290. op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
  291. break;
  292. case CPU_20KC:
  293. op_model_mipsxx_ops.cpu_type = "mips/20K";
  294. break;
  295. case CPU_24K:
  296. op_model_mipsxx_ops.cpu_type = "mips/24K";
  297. break;
  298. case CPU_25KF:
  299. op_model_mipsxx_ops.cpu_type = "mips/25K";
  300. break;
  301. case CPU_1004K:
  302. case CPU_34K:
  303. op_model_mipsxx_ops.cpu_type = "mips/34K";
  304. break;
  305. case CPU_1074K:
  306. case CPU_74K:
  307. op_model_mipsxx_ops.cpu_type = "mips/74K";
  308. break;
  309. case CPU_INTERAPTIV:
  310. op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
  311. break;
  312. case CPU_PROAPTIV:
  313. op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
  314. break;
  315. case CPU_P5600:
  316. op_model_mipsxx_ops.cpu_type = "mips/P5600";
  317. break;
  318. case CPU_I6400:
  319. op_model_mipsxx_ops.cpu_type = "mips/I6400";
  320. break;
  321. case CPU_M5150:
  322. op_model_mipsxx_ops.cpu_type = "mips/M5150";
  323. break;
  324. case CPU_5KC:
  325. op_model_mipsxx_ops.cpu_type = "mips/5K";
  326. break;
  327. case CPU_R10000:
  328. if ((current_cpu_data.processor_id & 0xff) == 0x20)
  329. op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
  330. else
  331. op_model_mipsxx_ops.cpu_type = "mips/r10000";
  332. break;
  333. case CPU_R12000:
  334. case CPU_R14000:
  335. op_model_mipsxx_ops.cpu_type = "mips/r12000";
  336. break;
  337. case CPU_R16000:
  338. op_model_mipsxx_ops.cpu_type = "mips/r16000";
  339. break;
  340. case CPU_SB1:
  341. case CPU_SB1A:
  342. op_model_mipsxx_ops.cpu_type = "mips/sb1";
  343. break;
  344. case CPU_LOONGSON1:
  345. op_model_mipsxx_ops.cpu_type = "mips/loongson1";
  346. break;
  347. case CPU_XLR:
  348. op_model_mipsxx_ops.cpu_type = "mips/xlr";
  349. break;
  350. default:
  351. printk(KERN_ERR "Profiling unsupported for this CPU\n");
  352. return -ENODEV;
  353. }
  354. save_perf_irq = perf_irq;
  355. perf_irq = mipsxx_perfcount_handler;
  356. if (get_c0_perfcount_int)
  357. perfcount_irq = get_c0_perfcount_int();
  358. else if (cp0_perfcount_irq >= 0)
  359. perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
  360. else
  361. perfcount_irq = -1;
  362. if (perfcount_irq >= 0)
  363. return request_irq(perfcount_irq, mipsxx_perfcount_int,
  364. IRQF_PERCPU | IRQF_NOBALANCING |
  365. IRQF_NO_THREAD | IRQF_NO_SUSPEND |
  366. IRQF_SHARED,
  367. "Perfcounter", save_perf_irq);
  368. return 0;
  369. }
  370. static void mipsxx_exit(void)
  371. {
  372. int counters = op_model_mipsxx_ops.num_counters;
  373. if (perfcount_irq >= 0)
  374. free_irq(perfcount_irq, save_perf_irq);
  375. counters = counters_per_cpu_to_total(counters);
  376. on_each_cpu(reset_counters, (void *)(long)counters, 1);
  377. perf_irq = save_perf_irq;
  378. }
  379. struct op_mips_model op_model_mipsxx_ops = {
  380. .reg_setup = mipsxx_reg_setup,
  381. .cpu_setup = mipsxx_cpu_setup,
  382. .init = mipsxx_init,
  383. .exit = mipsxx_exit,
  384. .cpu_start = mipsxx_cpu_start,
  385. .cpu_stop = mipsxx_cpu_stop,
  386. };