perf_event_server.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * Performance event support - PowerPC classic/server specific definitions.
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/types.h>
  12. #include <asm/hw_irq.h>
  13. #include <linux/device.h>
  14. #include <uapi/asm/perf_event.h>
  15. /* Update perf_event_print_debug() if this changes */
  16. #define MAX_HWEVENTS 8
  17. #define MAX_EVENT_ALTERNATIVES 8
  18. #define MAX_LIMITED_HWCOUNTERS 2
  19. struct perf_event;
  20. /*
  21. * This struct provides the constants and functions needed to
  22. * describe the PMU on a particular POWER-family CPU.
  23. */
  24. struct power_pmu {
  25. const char *name;
  26. int n_counter;
  27. int max_alternatives;
  28. unsigned long add_fields;
  29. unsigned long test_adder;
  30. int (*compute_mmcr)(u64 events[], int n_ev,
  31. unsigned int hwc[], unsigned long mmcr[],
  32. struct perf_event *pevents[]);
  33. int (*get_constraint)(u64 event_id, unsigned long *mskp,
  34. unsigned long *valp);
  35. int (*get_alternatives)(u64 event_id, unsigned int flags,
  36. u64 alt[]);
  37. void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
  38. u32 flags, struct pt_regs *regs);
  39. u64 (*bhrb_filter_map)(u64 branch_sample_type);
  40. void (*config_bhrb)(u64 pmu_bhrb_filter);
  41. void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
  42. int (*limited_pmc_event)(u64 event_id);
  43. u32 flags;
  44. const struct attribute_group **attr_groups;
  45. int n_generic;
  46. int *generic_events;
  47. int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
  48. [PERF_COUNT_HW_CACHE_OP_MAX]
  49. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  50. /* BHRB entries in the PMU */
  51. int bhrb_nr;
  52. };
  53. /*
  54. * Values for power_pmu.flags
  55. */
  56. #define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */
  57. #define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */
  58. #define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
  59. #define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
  60. #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
  61. #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
  62. #define PPMU_HAS_SIER 0x00000040 /* Has SIER */
  63. #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
  64. #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
  65. /*
  66. * Values for flags to get_alternatives()
  67. */
  68. #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
  69. #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
  70. #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
  71. extern int register_power_pmu(struct power_pmu *);
  72. struct pt_regs;
  73. extern unsigned long perf_misc_flags(struct pt_regs *regs);
  74. extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  75. extern unsigned long int read_bhrb(int n);
  76. /*
  77. * Only override the default definitions in include/linux/perf_event.h
  78. * if we have hardware PMU support.
  79. */
  80. #ifdef CONFIG_PPC_PERF_CTRS
  81. #define perf_misc_flags(regs) perf_misc_flags(regs)
  82. #endif
  83. /*
  84. * The power_pmu.get_constraint function returns a 32/64-bit value and
  85. * a 32/64-bit mask that express the constraints between this event_id and
  86. * other events.
  87. *
  88. * The value and mask are divided up into (non-overlapping) bitfields
  89. * of three different types:
  90. *
  91. * Select field: this expresses the constraint that some set of bits
  92. * in MMCR* needs to be set to a specific value for this event_id. For a
  93. * select field, the mask contains 1s in every bit of the field, and
  94. * the value contains a unique value for each possible setting of the
  95. * MMCR* bits. The constraint checking code will ensure that two events
  96. * that set the same field in their masks have the same value in their
  97. * value dwords.
  98. *
  99. * Add field: this expresses the constraint that there can be at most
  100. * N events in a particular class. A field of k bits can be used for
  101. * N <= 2^(k-1) - 1. The mask has the most significant bit of the field
  102. * set (and the other bits 0), and the value has only the least significant
  103. * bit of the field set. In addition, the 'add_fields' and 'test_adder'
  104. * in the struct power_pmu for this processor come into play. The
  105. * add_fields value contains 1 in the LSB of the field, and the
  106. * test_adder contains 2^(k-1) - 1 - N in the field.
  107. *
  108. * NAND field: this expresses the constraint that you may not have events
  109. * in all of a set of classes. (For example, on PPC970, you can't select
  110. * events from the FPU, ISU and IDU simultaneously, although any two are
  111. * possible.) For N classes, the field is N+1 bits wide, and each class
  112. * is assigned one bit from the least-significant N bits. The mask has
  113. * only the most-significant bit set, and the value has only the bit
  114. * for the event_id's class set. The test_adder has the least significant
  115. * bit set in the field.
  116. *
  117. * If an event_id is not subject to the constraint expressed by a particular
  118. * field, then it will have 0 in both the mask and value for that field.
  119. */
  120. extern ssize_t power_events_sysfs_show(struct device *dev,
  121. struct device_attribute *attr, char *page);
  122. /*
  123. * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
  124. *
  125. * Having a suffix allows us to have aliases in sysfs - eg: the generic
  126. * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
  127. * 'PM_CYC' where the latter is the name by which the event is known in
  128. * POWER CPU specification.
  129. *
  130. * Similarly, some hardware and cache events use the same event code. Eg.
  131. * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
  132. * to the same event, PM_LD_REF_L1. The suffix, allows us to have two
  133. * sysfs objects for the same event and thus two entries/aliases in sysfs.
  134. */
  135. #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
  136. #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
  137. #define EVENT_ATTR(_name, _id, _suffix) \
  138. PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id, \
  139. power_events_sysfs_show)
  140. #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
  141. #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
  142. #define CACHE_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _c)
  143. #define CACHE_EVENT_PTR(_id) EVENT_PTR(_id, _c)
  144. #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
  145. #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)