|
@@ -57,26 +57,7 @@ struct arc_reg_cc_build {
|
|
#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
|
|
#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
|
|
|
|
|
|
/*
|
|
/*
|
|
- * The "generalized" performance events seem to really be a copy
|
|
|
|
- * of the available events on x86 processors; the mapping to ARC
|
|
|
|
- * events is not always possible 1-to-1. Fortunately, there doesn't
|
|
|
|
- * seem to be an exact definition for these events, so we can cheat
|
|
|
|
- * a bit where necessary.
|
|
|
|
- *
|
|
|
|
- * In particular, the following PERF events may behave a bit differently
|
|
|
|
- * compared to other architectures:
|
|
|
|
- *
|
|
|
|
- * PERF_COUNT_HW_CPU_CYCLES
|
|
|
|
- * Cycles not in halted state
|
|
|
|
- *
|
|
|
|
- * PERF_COUNT_HW_REF_CPU_CYCLES
|
|
|
|
- * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
|
|
|
|
- * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
|
|
|
|
- *
|
|
|
|
- * PERF_COUNT_HW_BUS_CYCLES
|
|
|
|
- * Unclear what this means, Intel uses 0x013c, which according to
|
|
|
|
- * their datasheet means "unhalted reference cycles". It sounds similar
|
|
|
|
- * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
|
|
|
|
|
|
+ * Some ARC pct quirks:
|
|
*
|
|
*
|
|
* PERF_COUNT_HW_STALLED_CYCLES_BACKEND
|
|
* PERF_COUNT_HW_STALLED_CYCLES_BACKEND
|
|
* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
|
|
* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
|
|
@@ -91,21 +72,35 @@ struct arc_reg_cc_build {
|
|
* Note that I$ cache misses aren't counted by either of the two!
|
|
* Note that I$ cache misses aren't counted by either of the two!
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
|
|
|
|
+ * (based on a specific RTL build)
|
|
|
|
+ * Below is the static map between perf generic/arc specific event_id and
|
|
|
|
+ * h/w condition names.
|
|
|
|
+ * At the time of probe, we loop thru each index and find it's name to
|
|
|
|
+ * complete the mapping of perf event_id to h/w index as latter is needed
|
|
|
|
+ * to program the counter really
|
|
|
|
+ */
|
|
static const char * const arc_pmu_ev_hw_map[] = {
|
|
static const char * const arc_pmu_ev_hw_map[] = {
|
|
|
|
+ /* count cycles */
|
|
[PERF_COUNT_HW_CPU_CYCLES] = "crun",
|
|
[PERF_COUNT_HW_CPU_CYCLES] = "crun",
|
|
[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
|
|
[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
|
|
[PERF_COUNT_HW_BUS_CYCLES] = "crun",
|
|
[PERF_COUNT_HW_BUS_CYCLES] = "crun",
|
|
- [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
|
|
|
- [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
|
|
|
|
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
|
|
|
|
|
|
+
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
|
|
- [PERF_COUNT_ARC_DCLM] = "dclm",
|
|
|
|
- [PERF_COUNT_ARC_DCSM] = "dcsm",
|
|
|
|
- [PERF_COUNT_ARC_ICM] = "icm",
|
|
|
|
- [PERF_COUNT_ARC_BPOK] = "bpok",
|
|
|
|
- [PERF_COUNT_ARC_EDTLB] = "edtlb",
|
|
|
|
- [PERF_COUNT_ARC_EITLB] = "eitlb",
|
|
|
|
|
|
+
|
|
|
|
+ /* counts condition */
|
|
|
|
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
|
|
|
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
|
|
|
|
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
|
|
|
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
|
|
|
|
+
|
|
|
|
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
|
|
|
|
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
|
|
|
|
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
|
|
|
|
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
|
|
|
|
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
|
|
};
|
|
};
|
|
|
|
|
|
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
|
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|