|
|
@@ -64,17 +64,11 @@ struct mips_perf_event {
|
|
|
#define CNTR_EVEN 0x55555555
|
|
|
#define CNTR_ODD 0xaaaaaaaa
|
|
|
#define CNTR_ALL 0xffffffff
|
|
|
-#ifdef CONFIG_MIPS_MT_SMP
|
|
|
enum {
|
|
|
T = 0,
|
|
|
V = 1,
|
|
|
P = 2,
|
|
|
} range;
|
|
|
-#else
|
|
|
- #define T
|
|
|
- #define V
|
|
|
- #define P
|
|
|
-#endif
|
|
|
};
|
|
|
|
|
|
static struct mips_perf_event raw_event;
|
|
|
@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
|
|
{
|
|
|
struct perf_event *event = container_of(evt, struct perf_event, hw);
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
|
|
-#ifdef CONFIG_MIPS_MT_SMP
|
|
|
unsigned int range = evt->event_base >> 24;
|
|
|
-#endif /* CONFIG_MIPS_MT_SMP */
|
|
|
|
|
|
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
|
|
|
|
|
|
@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
|
|
/* Make sure interrupt enabled. */
|
|
|
MIPS_PERFCTRL_IE;
|
|
|
|
|
|
-#ifdef CONFIG_CPU_BMIPS5000
|
|
|
- {
|
|
|
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
|
|
|
/* enable the counter for the calling thread */
|
|
|
cpuc->saved_ctrl[idx] |=
|
|
|
(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
|
|
|
- }
|
|
|
-#else
|
|
|
-#ifdef CONFIG_MIPS_MT_SMP
|
|
|
- if (range > V) {
|
|
|
+ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
|
|
|
/* The counter is processor wide. Set it up to count all TCs. */
|
|
|
pr_debug("Enabling perf counter for all TCs\n");
|
|
|
cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
|
|
|
- } else
|
|
|
-#endif /* CONFIG_MIPS_MT_SMP */
|
|
|
- {
|
|
|
+ } else {
|
|
|
unsigned int cpu, ctrl;
|
|
|
|
|
|
/*
|
|
|
@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
|
|
cpuc->saved_ctrl[idx] |= ctrl;
|
|
|
pr_debug("Enabling perf counter for CPU%d\n", cpu);
|
|
|
}
|
|
|
-#endif /* CONFIG_CPU_BMIPS5000 */
|
|
|
/*
|
|
|
* We do not actually let the counter run. Leave it until start().
|
|
|
*/
|