|
@@ -36,7 +36,12 @@ struct cpu_hw_events {
|
|
|
struct perf_event *event[MAX_HWEVENTS];
|
|
|
u64 events[MAX_HWEVENTS];
|
|
|
unsigned int flags[MAX_HWEVENTS];
|
|
|
- unsigned long mmcr[3];
|
|
|
+ /*
|
|
|
+ * The order of the MMCR array is:
|
|
|
+ * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
|
|
|
+ * - 32-bit, MMCR0, MMCR1, MMCR2
|
|
|
+ */
|
|
|
+ unsigned long mmcr[4];
|
|
|
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
|
|
|
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
|
|
|
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
|
|
@@ -112,9 +117,9 @@ static bool is_ebb_event(struct perf_event *event) { return false; }
|
|
|
static int ebb_event_check(struct perf_event *event) { return 0; }
|
|
|
static void ebb_event_add(struct perf_event *event) { }
|
|
|
static void ebb_switch_out(unsigned long mmcr0) { }
|
|
|
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
|
|
|
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
|
|
|
{
|
|
|
- return mmcr0;
|
|
|
+ return cpuhw->mmcr[0];
|
|
|
}
|
|
|
|
|
|
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
|
|
@@ -542,8 +547,10 @@ static void ebb_switch_out(unsigned long mmcr0)
|
|
|
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
|
|
|
}
|
|
|
|
|
|
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
|
|
|
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
|
|
|
{
|
|
|
+ unsigned long mmcr0 = cpuhw->mmcr[0];
|
|
|
+
|
|
|
if (!ebb)
|
|
|
goto out;
|
|
|
|
|
@@ -568,7 +575,15 @@ static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
|
|
|
mtspr(SPRN_SIAR, current->thread.siar);
|
|
|
mtspr(SPRN_SIER, current->thread.sier);
|
|
|
mtspr(SPRN_SDAR, current->thread.sdar);
|
|
|
- mtspr(SPRN_MMCR2, current->thread.mmcr2);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Merge the kernel & user values of MMCR2. The semantics we implement
|
|
|
+ * are that the user MMCR2 can set bits, ie. cause counters to freeze,
|
|
|
+ * but not clear bits. If a task wants to be able to clear bits, ie.
|
|
|
+ * unfreeze counters, it should not set exclude_xxx in its events and
|
|
|
+ * instead manage the MMCR2 entirely by itself.
|
|
|
+ */
|
|
|
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
|
|
|
out:
|
|
|
return mmcr0;
|
|
|
}
|
|
@@ -915,6 +930,14 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
|
|
|
int i, n, first;
|
|
|
struct perf_event *event;
|
|
|
|
|
|
+ /*
|
|
|
+ * If the PMU we're on supports per event exclude settings then we
|
|
|
+ * don't need to do any of this logic. NB. This assumes no PMU has both
|
|
|
+ * per event exclude and limited PMCs.
|
|
|
+ */
|
|
|
+ if (ppmu->flags & PPMU_ARCH_207S)
|
|
|
+ return 0;
|
|
|
+
|
|
|
n = n_prev + n_new;
|
|
|
if (n <= 1)
|
|
|
return 0;
|
|
@@ -1230,19 +1253,20 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Add in MMCR0 freeze bits corresponding to the
|
|
|
- * attr.exclude_* bits for the first event.
|
|
|
- * We have already checked that all events have the
|
|
|
- * same values for these bits as the first event.
|
|
|
- */
|
|
|
- event = cpuhw->event[0];
|
|
|
- if (event->attr.exclude_user)
|
|
|
- cpuhw->mmcr[0] |= MMCR0_FCP;
|
|
|
- if (event->attr.exclude_kernel)
|
|
|
- cpuhw->mmcr[0] |= freeze_events_kernel;
|
|
|
- if (event->attr.exclude_hv)
|
|
|
- cpuhw->mmcr[0] |= MMCR0_FCHV;
|
|
|
+ if (!(ppmu->flags & PPMU_ARCH_207S)) {
|
|
|
+ /*
|
|
|
+ * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
|
|
|
+ * bits for the first event. We have already checked that all
|
|
|
+ * events have the same value for these bits as the first event.
|
|
|
+ */
|
|
|
+ event = cpuhw->event[0];
|
|
|
+ if (event->attr.exclude_user)
|
|
|
+ cpuhw->mmcr[0] |= MMCR0_FCP;
|
|
|
+ if (event->attr.exclude_kernel)
|
|
|
+ cpuhw->mmcr[0] |= freeze_events_kernel;
|
|
|
+ if (event->attr.exclude_hv)
|
|
|
+ cpuhw->mmcr[0] |= MMCR0_FCHV;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Write the new configuration to MMCR* with the freeze
|
|
@@ -1254,6 +1278,8 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
|
|
|
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
|
|
|
| MMCR0_FC);
|
|
|
+ if (ppmu->flags & PPMU_ARCH_207S)
|
|
|
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
|
|
|
|
|
|
/*
|
|
|
* Read off any pre-existing events that need to move
|
|
@@ -1309,10 +1335,7 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
out_enable:
|
|
|
pmao_restore_workaround(ebb);
|
|
|
|
|
|
- if (ppmu->flags & PPMU_ARCH_207S)
|
|
|
- mtspr(SPRN_MMCR2, 0);
|
|
|
-
|
|
|
- mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
|
|
|
+ mmcr0 = ebb_switch_in(ebb, cpuhw);
|
|
|
|
|
|
mb();
|
|
|
if (cpuhw->bhrb_users)
|